corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-668201 | 2410.08115 | Optima: Optimizing Effectiveness and Efficiency for LLM-Based Multi-Agent System | <|reference_start|>Optima: Optimizing Effectiveness and Efficiency for LLM-Based Multi-Agent System: Large Language Model (LLM) based multi-agent systems (MAS) show remarkable potential in collaborative problem-solving, yet they still face critical challenges: low communication efficiency, poor scalability, and a lack of effective parameter-updating optimization methods. We present Optima, a novel framework that addresses these issues by significantly enhancing both communication efficiency and task effectiveness in LLM-based MAS through LLM training. Optima employs an iterative generate, rank, select, and train paradigm with a reward function balancing task performance, token efficiency, and communication readability. We explore various RL algorithms, including Supervised Fine-Tuning, Direct Preference Optimization, and their hybrid approaches, providing insights into their effectiveness-efficiency trade-offs. We integrate Monte Carlo Tree Search-inspired techniques for DPO data generation, treating conversation turns as tree nodes to explore diverse interaction paths. Evaluated on common multi-agent tasks, including information-asymmetric question answering and complex reasoning, Optima shows consistent and substantial improvements over single-agent baselines and vanilla MAS based on Llama 3 8B, achieving up to 2.8x performance gain with less than 10\% tokens on tasks requiring heavy information exchange. Moreover, Optima's efficiency gains open new possibilities for leveraging inference-compute more effectively, leading to improved inference-time scaling laws. By addressing fundamental challenges in LLM-based MAS, Optima shows the potential towards scalable, efficient, and effective MAS (https://chenweize1998.github.io/optima-project-page).<|reference_end|> | arxiv | @article{chen2024optima:,
title={Optima: Optimizing Effectiveness and Efficiency for LLM-Based
Multi-Agent System},
author={Weize Chen, Jiarui Yuan, Chen Qian, Cheng Yang, Zhiyuan Liu, Maosong
Sun},
journal={arXiv preprint arXiv:2410.08115},
year={2024},
archivePrefix={arXiv},
eprint={2410.08115},
primaryClass={cs.CL cs.AI}
} | chen2024optima: |
arxiv-668202 | 2410.08117 | On Barycenter Computation: Semi-Unbalanced Optimal Transport-based Method on Gaussians | <|reference_start|>On Barycenter Computation: Semi-Unbalanced Optimal Transport-based Method on Gaussians: We explore a robust version of the barycenter problem among $n$ centered Gaussian probability measures, termed Semi-Unbalanced Optimal Transport (SUOT)-based Barycenter, wherein the barycenter remains fixed while the others are relaxed using Kullback-Leibler divergence. We develop optimization algorithms on Bures-Wasserstein manifold, named the Exact Geodesic Gradient Descent and Hybrid Gradient Descent algorithms. While the Exact Geodesic Gradient Descent method is based on computing the exact closed form of the first-order derivative of the objective function of the barycenter along a geodesic on the Bures manifold, the Hybrid Gradient Descent method utilizes optimizer components when solving the SUOT problem to replace outlier measures before applying the Riemannian Gradient Descent. We establish the theoretical convergence guarantees for both methods and demonstrate that the Exact Geodesic Gradient Descent algorithm attains a dimension-free convergence rate. Finally, we conduct experiments to compare the normal Wasserstein Barycenter with ours and perform an ablation study.<|reference_end|> | arxiv | @article{nguyen2024on,
title={On Barycenter Computation: Semi-Unbalanced Optimal Transport-based
Method on Gaussians},
author={Ngoc-Hai Nguyen, Dung Le, Hoang-Phi Nguyen, Tung Pham, Nhat Ho},
journal={arXiv preprint arXiv:2410.08117},
year={2024},
archivePrefix={arXiv},
eprint={2410.08117},
primaryClass={cs.LG}
} | nguyen2024on |
arxiv-668203 | 2410.08118 | Medical Image Quality Assessment based on Probability of Necessity and Sufficiency | <|reference_start|>Medical Image Quality Assessment based on Probability of Necessity and Sufficiency: Medical image quality assessment (MIQA) is essential for reliable medical image analysis. While deep learning has shown promise in this field, current models could be misled by spurious correlations learned from data and struggle with out-of-distribution (OOD) scenarios. To that end, we propose an MIQA framework based on a concept from causal inference: Probability of Necessity and Sufficiency (PNS). PNS measures how likely a set of features is to be both necessary (always present for an outcome) and sufficient (capable of guaranteeing an outcome) for a particular result. Our approach leverages this concept by learning hidden features from medical images with high PNS values for quality prediction. This encourages models to capture more essential predictive information, enhancing their robustness to OOD scenarios. We evaluate our framework on an Anterior Segment Optical Coherence Tomography (AS-OCT) dataset for the MIQA task and experimental results demonstrate the effectiveness of our framework.<|reference_end|> | arxiv | @article{chen2024medical,
title={Medical Image Quality Assessment based on Probability of Necessity and
Sufficiency},
author={Boyu Chen, Ameenat L. Solebo, Weiye Bao, Paul Taylor},
journal={arXiv preprint arXiv:2410.08118},
year={2024},
archivePrefix={arXiv},
eprint={2410.08118},
primaryClass={cs.CV}
} | chen2024medical |
arxiv-668204 | 2410.08119 | Q-VLM: Post-training Quantization for Large Vision-Language Models | <|reference_start|>Q-VLM: Post-training Quantization for Large Vision-Language Models: In this paper, we propose a post-training quantization framework of large vision-language models (LVLMs) for efficient multi-modal inference. Conventional quantization methods sequentially search the layer-wise rounding functions by minimizing activation discretization errors, which fails to acquire optimal quantization strategy without considering cross-layer dependency. On the contrary, we mine the cross-layer dependency that significantly influences discretization errors of the entire vision-language model, and embed this dependency into optimal quantization strategy searching with low search cost. Specifically, we observe the strong correlation between the activation entropy and the cross-layer dependency concerning output discretization errors. Therefore, we employ the entropy as the proxy to partition blocks optimally, which aims to achieve satisfying trade-offs between discretization errors and the search cost. Moreover, we optimize the visual encoder to disentangle the cross-layer dependency for fine-grained decomposition of search space, so that the search cost is further reduced without harming the quantization accuracy. Experimental results demonstrate that our method compresses the memory by 2.78x and increase generate speed by 1.44x about 13B LLaVA model without performance degradation on diverse multi-modal reasoning tasks. Code is available at https://github.com/ChangyuanWang17/QVLM.<|reference_end|> | arxiv | @article{wang2024q-vlm:,
title={Q-VLM: Post-training Quantization for Large Vision-Language Models},
author={Changyuan Wang, Ziwei Wang, Xiuwei Xu, Yansong Tang, Jie Zhou, Jiwen
Lu},
journal={arXiv preprint arXiv:2410.08119},
year={2024},
archivePrefix={arXiv},
eprint={2410.08119},
primaryClass={cs.CV}
} | wang2024q-vlm: |
arxiv-668205 | 2410.08120 | CCA-Secure Key-Aggregate Proxy Re-Encryption for Secure Cloud Storage | <|reference_start|>CCA-Secure Key-Aggregate Proxy Re-Encryption for Secure Cloud Storage: The development of cloud services in recent years has mushroomed, for example, Google Drive, Amazon AWS, Microsoft Azure. Merchants can easily use cloud services to open their online shops in a few seconds. Users can easily and quickly connect to the cloud in their own portable devices, and access their personal information effortlessly. Because users store large amounts of data on third-party devices, ensuring data confidentiality, availability and integrity become especially important. Therefore, data protection in cloud storage is the key to the survival of the cloud industry. Fortunately, Proxy Re-Encryption schemes enable users to convert their ciphertext into others ciphertext by using a re-encryption key. This method gracefully transforms the users computational cost to the server. In addition, with C-PREs, users can apply their access control right on the encrypted data. Recently, we lowered the key storage cost of C-PREs to constant size and proposed the first Key-Aggregate Proxy Re-Encryption scheme. In this paper, we further prove that our scheme is a CCA-secure Key-Aggregate Proxy Re-Encryption scheme in the adaptive model without using random oracle. Moreover, we also implement and analyze the Key Aggregate PRE application in the real world scenario.<|reference_end|> | arxiv | @article{chen2024cca-secure,
title={CCA-Secure Key-Aggregate Proxy Re-Encryption for Secure Cloud Storage},
author={Wei-Hao Chen and Chun-I Fan and Yi-Fan Tseng},
journal={arXiv preprint arXiv:2410.08120},
year={2024},
archivePrefix={arXiv},
eprint={2410.08120},
primaryClass={cs.CR}
} | chen2024cca-secure |
arxiv-668206 | 2410.08121 | Heterogeneous Graph Auto-Encoder for CreditCard Fraud Detection | <|reference_start|>Heterogeneous Graph Auto-Encoder for CreditCard Fraud Detection: The digital revolution has significantly impacted financial transactions, leading to a notable increase in credit card usage. However, this convenience comes with a trade-off: a substantial rise in fraudulent activities. Traditional machine learning methods for fraud detection often struggle to capture the inherent interconnectedness within financial data. This paper proposes a novel approach for credit card fraud detection that leverages Graph Neural Networks (GNNs) with attention mechanisms applied to heterogeneous graph representations of financial data. Unlike homogeneous graphs, heterogeneous graphs capture intricate relationships between various entities in the financial ecosystem, such as cardholders, merchants, and transactions, providing a richer and more comprehensive data representation for fraud analysis. To address the inherent class imbalance in fraud data, where genuine transactions significantly outnumber fraudulent ones, the proposed approach integrates an autoencoder. This autoencoder, trained on genuine transactions, learns a latent representation and flags deviations during reconstruction as potential fraud. This research investigates two key questions: (1) How effectively can a GNN with an attention mechanism detect and prevent credit card fraud when applied to a heterogeneous graph? (2) How does the efficacy of the autoencoder with attention approach compare to traditional methods? The results are promising, demonstrating that the proposed model outperforms benchmark algorithms such as Graph Sage and FI-GRL, achieving a superior AUC-PR of 0.89 and an F1-score of 0.81. This research significantly advances fraud detection systems and the overall security of financial transactions by leveraging GNNs with attention mechanisms and addressing class imbalance through an autoencoder.<|reference_end|> | arxiv | @article{singh2024heterogeneous,
title={Heterogeneous Graph Auto-Encoder for CreditCard Fraud Detection},
author={Moirangthem Tiken Singh, Rabinder Kumar Prasad, Gurumayum Robert
Michael, N K Kaphungkui, N.Hemarjit Singh},
journal={arXiv preprint arXiv:2410.08121},
year={2024},
archivePrefix={arXiv},
eprint={2410.08121},
primaryClass={cs.LG cs.AI}
} | singh2024heterogeneous |
arxiv-668207 | 2410.08122 | PP-GWAS: Privacy Preserving Multi-Site Genome-wide Association Studies | <|reference_start|>PP-GWAS: Privacy Preserving Multi-Site Genome-wide Association Studies: Genome-wide association studies are pivotal in understanding the genetic underpinnings of complex traits and diseases. Collaborative, multi-site GWAS aim to enhance statistical power but face obstacles due to the sensitive nature of genomic data sharing. Current state-of-the-art methods provide a privacy-focused approach utilizing computationally expensive methods such as Secure Multi-Party Computation and Homomorphic Encryption. In this context, we present a novel algorithm PP-GWAS designed to improve upon existing standards in terms of computational efficiency and scalability without sacrificing data privacy. This algorithm employs randomized encoding within a distributed architecture to perform stacked ridge regression on a Linear Mixed Model to ensure rigorous analysis. Experimental evaluation with real world and synthetic data indicates that PP-GWAS can achieve computational speeds twice as fast as similar state-of-the-art algorithms while using lesser computational resources, all while adhering to a robust security model that caters to an all-but-one semi-honest adversary setting. We have assessed its performance using various datasets, emphasizing its potential in facilitating more efficient and private genomic analyses.<|reference_end|> | arxiv | @article{swaminathan2024pp-gwas:,
title={PP-GWAS: Privacy Preserving Multi-Site Genome-wide Association Studies},
author={Arjhun Swaminathan, Anika Hannemann, Ali Burak "Unal, Nico Pfeifer,
Mete Akg"un},
journal={arXiv preprint arXiv:2410.08122},
year={2024},
archivePrefix={arXiv},
eprint={2410.08122},
primaryClass={cs.CR}
} | swaminathan2024pp-gwas: |
arxiv-668208 | 2410.08125 | Generalizing Stochastic Smoothing for Differentiation and Gradient Estimation | <|reference_start|>Generalizing Stochastic Smoothing for Differentiation and Gradient Estimation: We deal with the problem of gradient estimation for stochastic differentiable relaxations of algorithms, operators, simulators, and other non-differentiable functions. Stochastic smoothing conventionally perturbs the input of a non-differentiable function with a differentiable density distribution with full support, smoothing it and enabling gradient estimation. Our theory starts at first principles to derive stochastic smoothing with reduced assumptions, without requiring a differentiable density nor full support, and we present a general framework for relaxation and gradient estimation of non-differentiable black-box functions $f:\mathbb{R}^n\to\mathbb{R}^m$. We develop variance reduction for gradient estimation from 3 orthogonal perspectives. Empirically, we benchmark 6 distributions and up to 24 variance reduction strategies for differentiable sorting and ranking, differentiable shortest-paths on graphs, differentiable rendering for pose estimation, as well as differentiable cryo-ET simulations.<|reference_end|> | arxiv | @article{petersen2024generalizing,
title={Generalizing Stochastic Smoothing for Differentiation and Gradient
Estimation},
author={Felix Petersen, Christian Borgelt, Aashwin Mishra, Stefano Ermon},
journal={arXiv preprint arXiv:2410.08125},
year={2024},
archivePrefix={arXiv},
eprint={2410.08125},
primaryClass={cs.LG stat.ML}
} | petersen2024generalizing |
arxiv-668209 | 2410.08126 | Mars: Situated Inductive Reasoning in an Open-World Environment | <|reference_start|>Mars: Situated Inductive Reasoning in an Open-World Environment: Large Language Models (LLMs) trained on massive corpora have shown remarkable success in knowledge-intensive tasks. Yet, most of them rely on pre-stored knowledge. Inducing new general knowledge from a specific environment and performing reasoning with the acquired knowledge -- \textit{situated inductive reasoning}, is crucial and challenging for machine intelligence. In this paper, we design Mars, an interactive environment devised for situated inductive reasoning. It introduces counter-commonsense game mechanisms by modifying terrain, survival setting and task dependency while adhering to certain principles. In Mars, agents need to actively interact with their surroundings, derive useful rules and perform decision-making tasks in specific contexts. We conduct experiments on various RL-based and LLM-based methods, finding that they all struggle on this challenging situated inductive reasoning benchmark. Furthermore, we explore \textit{Induction from Reflection}, where we instruct agents to perform inductive reasoning from history trajectory. The superior performance underscores the importance of inductive reasoning in Mars. Through Mars, we aim to galvanize advancements in situated inductive reasoning and set the stage for developing the next generation of AI systems that can reason in an adaptive and context-sensitive way.<|reference_end|> | arxiv | @article{tang2024mars:,
title={Mars: Situated Inductive Reasoning in an Open-World Environment},
author={Xiaojuan Tang, Jiaqi Li, Yitao Liang, Song-chun Zhu, Muhan Zhang,
Zilong Zheng},
journal={arXiv preprint arXiv:2410.08126},
year={2024},
archivePrefix={arXiv},
eprint={2410.08126},
primaryClass={cs.LG cs.AI cs.CL}
} | tang2024mars: |
arxiv-668210 | 2410.08127 | Aggregation of Antagonistic Contingent Preferences: When Is It Possible? | <|reference_start|>Aggregation of Antagonistic Contingent Preferences: When Is It Possible?: We study a two-alternative voting game where voters' preferences depend on an unobservable world state and each voter receives a private signal correlated to the true world state. We consider the collective decision when voters can collaborate in a group and have antagonistic preferences -- given the revealed world state, voters will support different alternatives. We identify sharp thresholds for the fraction of the majority-type voters necessary for preference aggregation. We specifically examine the majority vote mechanism (where each voter has one vote, and the alternative with more votes wins) and pinpoint a critical threshold, denoted as $\theta_{\texttt{maj}}$, for the majority-type proportion. When the fraction of majority-type voters surpasses $\theta_{\texttt{maj}}$, there is a symmetric strategy for the majority-type that leads to strategic equilibria favoring informed majority decisions. Conversely, when the majority-type proportion falls below $\theta_{\texttt{maj}}$, equilibrium does not exist, rendering the aggregation of informed majority decisions impossible. Additionally, we propose an easy-to-implement mechanism that establishes a lower threshold $\theta^\ast$ (with $\theta^\ast \leq \theta_{\texttt{maj}}$) for both equilibria and informed majority decision aggregation. We demonstrate that $\theta^\ast$ is optimal by proving a general impossibility result: if the majority-type proportion is below $\theta^\ast$, with mild assumptions, no mechanism can aggregate the preferences, meaning that no equilibrium leads to the informed majority decision for any mechanism.<|reference_end|> | arxiv | @article{deng2024aggregation,
title={Aggregation of Antagonistic Contingent Preferences: When Is It Possible?},
author={Xiaotie Deng, Biaoshuai Tao, Ying Wang},
journal={arXiv preprint arXiv:2410.08127},
year={2024},
archivePrefix={arXiv},
eprint={2410.08127},
primaryClass={cs.GT}
} | deng2024aggregation |
arxiv-668211 | 2410.08129 | Efficient Perspective-Correct 3D Gaussian Splatting Using Hybrid Transparency | <|reference_start|>Efficient Perspective-Correct 3D Gaussian Splatting Using Hybrid Transparency: 3D Gaussian Splats (3DGS) have proven a versatile rendering primitive, both for inverse rendering as well as real-time exploration of scenes. In these applications, coherence across camera frames and multiple views is crucial, be it for robust convergence of a scene reconstruction or for artifact-free fly-throughs. Recent work started mitigating artifacts that break multi-view coherence, including popping artifacts due to inconsistent transparency sorting and perspective-correct outlines of (2D) splats. At the same time, real-time requirements forced such implementations to accept compromises in how transparency of large assemblies of 3D Gaussians is resolved, in turn breaking coherence in other ways. In our work, we aim at achieving maximum coherence, by rendering fully perspective-correct 3D Gaussians while using a high-quality approximation of accurate blending, hybrid transparency, on a per-pixel level, in order to retain real-time frame rates. Our fast and perspectively accurate approach for evaluation of 3D Gaussians does not require matrix inversions, thereby ensuring numerical stability and eliminating the need for special handling of degenerate splats, and the hybrid transparency formulation for blending maintains similar quality as fully resolved per-pixel transparencies at a fraction of the rendering costs. We further show that each of these two components can be independently integrated into Gaussian splatting systems. In combination, they achieve up to 2$\times$ higher frame rates, 2$\times$ faster optimization, and equal or better image quality with fewer rendering artifacts compared to traditional 3DGS on common benchmarks.<|reference_end|> | arxiv | @article{hahlbohm2024efficient,
title={Efficient Perspective-Correct 3D Gaussian Splatting Using Hybrid
Transparency},
author={Florian Hahlbohm, Fabian Friederichs, Tim Weyrich, Linus Franke,
Moritz Kappel, Susana Castillo, Marc Stamminger, Martin Eisemann, Marcus
Magnor},
journal={arXiv preprint arXiv:2410.08129},
year={2024},
archivePrefix={arXiv},
eprint={2410.08129},
primaryClass={cs.GR cs.CV}
} | hahlbohm2024efficient |
arxiv-668212 | 2410.08130 | Think Beyond Size: Dynamic Prompting for More Effective Reasoning | <|reference_start|>Think Beyond Size: Dynamic Prompting for More Effective Reasoning: This paper presents Dynamic Prompting, a novel framework aimed at improving the reasoning capabilities of Large Language Models (LLMs). In contrast to conventional static prompting methods, Dynamic Prompting enables the adaptive modification of prompt sequences and step counts based on real-time task complexity and model performance. This dynamic adaptation facilitates more efficient problem-solving, particularly in smaller models, by reducing hallucinations and repetitive cycles. Our empirical evaluations demonstrate that Dynamic Prompting allows smaller LLMs to perform competitively with much larger models, thereby challenging the conventional emphasis on model size as the primary determinant of reasoning efficacy.<|reference_end|> | arxiv | @article{r2024think,
title={Think Beyond Size: Dynamic Prompting for More Effective Reasoning},
author={Kamesh R},
journal={arXiv preprint arXiv:2410.08130},
year={2024},
archivePrefix={arXiv},
eprint={2410.08130},
primaryClass={cs.LG cs.CL}
} | r2024think |
arxiv-668213 | 2410.08131 | Deconstructing equivariant representations in molecular systems | <|reference_start|>Deconstructing equivariant representations in molecular systems: Recent equivariant models have shown significant progress in not just chemical property prediction, but as surrogates for dynamical simulations of molecules and materials. Many of the top performing models in this category are built within the framework of tensor products, which preserves equivariance by restricting interactions and transformations to those that are allowed by symmetry selection rules. Despite being a core part of the modeling process, there has not yet been much attention into understanding what information persists in these equivariant representations, and their general behavior outside of benchmark metrics. In this work, we report on a set of experiments using a simple equivariant graph convolution model on the QM9 dataset, focusing on correlating quantitative performance with the resulting molecular graph embeddings. Our key finding is that, for a scalar prediction task, many of the irreducible representations are simply ignored during training -- specifically those pertaining to vector ($l=1$) and tensor quantities ($l=2$) -- an issue that does not necessarily make itself evident in the test metric. We empirically show that removing some unused orders of spherical harmonics improves model performance, correlating with improved latent space structure. We provide a number of recommendations for future experiments to try and improve efficiency and utilization of equivariant features based on these observations.<|reference_end|> | arxiv | @article{lee2024deconstructing,
title={Deconstructing equivariant representations in molecular systems},
author={Kin Long Kelvin Lee, Mikhail Galkin, Santiago Miret},
journal={arXiv preprint arXiv:2410.08131},
year={2024},
archivePrefix={arXiv},
eprint={2410.08131},
primaryClass={cond-mat.mtrl-sci cs.LG physics.chem-ph}
} | lee2024deconstructing |
arxiv-668214 | 2410.08133 | Assessing Episodic Memory in LLMs with Sequence Order Recall Tasks | <|reference_start|>Assessing Episodic Memory in LLMs with Sequence Order Recall Tasks: Current LLM benchmarks focus on evaluating models' memory of facts and semantic relations, primarily assessing semantic aspects of long-term memory. However, in humans, long-term memory also includes episodic memory, which links memories to their contexts, such as the time and place they occurred. The ability to contextualize memories is crucial for many cognitive tasks and everyday functions. This form of memory has not been evaluated in LLMs with existing benchmarks. To address the gap in evaluating memory in LLMs, we introduce Sequence Order Recall Tasks (SORT), which we adapt from tasks used to study episodic memory in cognitive psychology. SORT requires LLMs to recall the correct order of text segments, and provides a general framework that is both easily extendable and does not require any additional annotations. We present an initial evaluation dataset, Book-SORT, comprising 36k pairs of segments extracted from 9 books recently added to the public domain. Based on a human experiment with 155 participants, we show that humans can recall sequence order based on long-term memory of a book. We find that models can perform the task with high accuracy when relevant text is given in-context during the SORT evaluation. However, when presented with the book text only during training, LLMs' performance on SORT falls short. By allowing to evaluate more aspects of memory, we believe that SORT will aid in the emerging development of memory-augmented models.<|reference_end|> | arxiv | @article{pink2024assessing,
title={Assessing Episodic Memory in LLMs with Sequence Order Recall Tasks},
author={Mathis Pink, Vy A. Vo, Qinyuan Wu, Jianing Mu, Javier S. Turek, Uri
Hasson, Kenneth A. Norman, Sebastian Michelmann, Alexander Huth, Mariya
Toneva},
journal={arXiv preprint arXiv:2410.08133},
year={2024},
archivePrefix={arXiv},
eprint={2410.08133},
primaryClass={cs.CL cs.AI cs.LG}
} | pink2024assessing |
arxiv-668215 | 2410.08134 | Steering Masked Discrete Diffusion Models via Discrete Denoising Posterior Prediction | <|reference_start|>Steering Masked Discrete Diffusion Models via Discrete Denoising Posterior Prediction: Generative modeling of discrete data underlies important applications spanning text-based agents like ChatGPT to the design of the very building blocks of life in protein sequences. However, application domains need to exert control over the generated data by steering the generative process - typically via RLHF - to satisfy a specified property, reward, or affinity metric. In this paper, we study the problem of steering Masked Diffusion Models (MDMs), a recent class of discrete diffusion models that offer a compelling alternative to traditional autoregressive models. We introduce Discrete Denoising Posterior Prediction (DDPP), a novel framework that casts the task of steering pre-trained MDMs as a problem of probabilistic inference by learning to sample from a target Bayesian posterior. Our DDPP framework leads to a family of three novel objectives that are all simulation-free, and thus scalable while applying to general non-differentiable reward functions. Empirically, we instantiate DDPP by steering MDMs to perform class-conditional pixel-level image modeling, RLHF-based alignment of MDMs using text-based rewards, and finetuning protein language models to generate more diverse secondary structures and shorter proteins. We substantiate our designs via wet-lab validation, where we observe transient expression of reward-optimized protein sequences.<|reference_end|> | arxiv | @article{rector-brooks2024steering,
title={Steering Masked Discrete Diffusion Models via Discrete Denoising
Posterior Prediction},
author={Jarrid Rector-Brooks, Mohsin Hasan, Zhangzhi Peng, Zachary Quinn,
Chenghao Liu, Sarthak Mittal, Nouha Dziri, Michael Bronstein, Yoshua Bengio,
Pranam Chatterjee, Alexander Tong, Avishek Joey Bose},
journal={arXiv preprint arXiv:2410.08134},
year={2024},
archivePrefix={arXiv},
eprint={2410.08134},
primaryClass={cs.LG cs.AI}
} | rector-brooks2024steering |
arxiv-668216 | 2410.08135 | State Feedback System Level Synthesis in Continuous Time | <|reference_start|>State Feedback System Level Synthesis in Continuous Time: System level synthesis (SLS) is a controller parameterization technique that facilitates distributed structured control via convex techniques. Results on SLS are primarily in the discrete-time setting; this paper extends SLS to the continuous-time setting. We translate the parametrization and associated constraints to continuous time, and propose a controller design procedure consisting of two steps: (1) pole selection and (2) optimization over closed-loops. We provide SLS reformulations of H2 and Hinf control, and show that the proposed procedure allows for convex design of structured H2 and Hinf controllers. We verify our methods in simulation on a grid of linearized swing equations. The resulting structured (i.e. sparse) controllers perform similarly (in some cases within 1\% cost) as the centralized (i.e. dense) controllers. The proposed procedure preserves the scalability and disturbance-rejection features of the original discrete-time SLS framework.<|reference_end|> | arxiv | @article{du2024state,
title={State Feedback System Level Synthesis in Continuous Time},
author={Yaozhi Du, Jing Shuang Li},
journal={arXiv preprint arXiv:2410.08135},
year={2024},
archivePrefix={arXiv},
eprint={2410.08135},
primaryClass={eess.SY cs.SY}
} | du2024state |
arxiv-668217 | 2410.08136 | SoundScape: A Human-AI Co-Creation System Making Your Memories Heard | <|reference_start|>SoundScape: A Human-AI Co-Creation System Making Your Memories Heard: Sound plays a significant role in human memory, yet it is often overlooked by mainstream life-recording methods. Most current UGC (User-Generated Content) creation tools emphasize visual content while lacking user-friendly sound design features. This paper introduces SoundScape, a human-AI co-creation system that allows users to easily create sound memories on mobile devices through innovative interaction. By integrating sound effects and music with visual scenes, SoundScape encourages users to enrich their creations with immersive sound elements, enhancing the atmosphere of their works. To support public creation, SoundScape incorporates a conversational agent and AI music generation technology. User studies indicate that our approach is effective for sound memory creation, with SoundScape outperforming existing tools in terms of user experience and the perceived quality of produced works.<|reference_end|> | arxiv | @article{zhong2024soundscape:,
title={SoundScape: A Human-AI Co-Creation System Making Your Memories Heard},
author={Chongjun Zhong, Jiaxing Yu, Yingping Cao, Songruoyao Wu, Wenqi Wu, and
Kejun Zhang},
journal={arXiv preprint arXiv:2410.08136},
year={2024},
archivePrefix={arXiv},
eprint={2410.08136},
primaryClass={cs.HC}
} | zhong2024soundscape: |
arxiv-668218 | 2410.08137 | Generalized Fixed-Depth Prefix and Postfix Symbolic Regression Grammars | <|reference_start|>Generalized Fixed-Depth Prefix and Postfix Symbolic Regression Grammars: We develop faultless, fixed-depth, string-based, prefix and postfix symbolic regression grammars, capable of producing \emph{any} expression from a set of operands, unary operators and/or binary operators. Using these grammars, we outline simplified forms of 5 popular heuristic search strategies: Brute Force Search, Monte Carlo Tree Search, Particle Swarm Optimization, Genetic Programming, and Simulated Annealing. For each algorithm, we compare the relative performance of prefix vs postfix for ten ground-truth expressions implemented entirely within a common C++/Eigen framework. Our experiments show a comparatively strong correlation between the average number of nodes per layer of the ground truth expression tree and the relative performance of prefix vs postfix. The fixed-depth grammars developed herein can enhance scientific discovery by increasing the efficiency of symbolic regression, enabling faster identification of accurate mathematical models across various disciplines.<|reference_end|> | arxiv | @article{finkelstein2024generalized,
title={Generalized Fixed-Depth Prefix and Postfix Symbolic Regression Grammars},
author={Edward Finkelstein},
journal={arXiv preprint arXiv:2410.08137},
year={2024},
archivePrefix={arXiv},
eprint={2410.08137},
primaryClass={cs.SC}
} | finkelstein2024generalized |
arxiv-668219 | 2410.08140 | Optimality of meta-converse for channel simulation | <|reference_start|>Optimality of meta-converse for channel simulation: We study the effect of shared non-signaling correlations for the problem of simulating a channel using noiseless communication in the one-shot setting. For classical channels, we show how to round any non-signaling-assisted simulation strategy--which corresponds to the natural linear programming meta-converse for channel simulation--to a strategy that only uses shared randomness. For quantum channels, we round any non-signaling-assisted simulation strategy to a strategy that only uses shared entanglement. Our main result is for classical and classical-quantum channels, for which we employ ideas from approximation algorithms to give a guarantee on the ratio of success probabilities of at least $(1-\mathrm{e}^{-1})$. We further show this ratio to be optimal for the purely classical case. It can be improved to $(1-t^{-1})$ using $O(\ln \ln(t))$ additional bits of communication.<|reference_end|> | arxiv | @article{oufkir2024optimality,
title={Optimality of meta-converse for channel simulation},
author={Aadil Oufkir, Omar Fawzi, Mario Berta},
journal={arXiv preprint arXiv:2410.08140},
year={2024},
archivePrefix={arXiv},
eprint={2410.08140},
primaryClass={quant-ph cs.IT math.IT}
} | oufkir2024optimality |
arxiv-668220 | 2410.08142 | Improved Condensers for Chor-Goldreich Sources | <|reference_start|>Improved Condensers for Chor-Goldreich Sources: One of the earliest models of weak randomness is the Chor-Goldreich (CG) source. A $(t,n,k)$-CG source is a sequence of random variables $X=(X_1,\dots,X_t)\sim(\{0,1\}^n)^t$, where each $X_i$ has min-entropy $k$ conditioned on any fixing of $X_1,\dots,X_{i-1}$. Chor and Goldreich proved that there is no deterministic way to extract randomness from such a source. Nevertheless, Doron, Moshkovitz, Oh, and Zuckerman showed that there is a deterministic way to condense a CG source into a string with small entropy gap. They gave applications of such a condenser to simulating randomized algorithms with small error and to certain cryptographic tasks. They studied the case where the block length $n$ and entropy rate $k/n$ are both constant. We study the much more general setting where the block length can be arbitrarily large, and the entropy rate can be arbitrarily small. We construct the first explicit condenser for CG sources in this setting, and it can be instantiated in a number of different ways. When the entropy rate of the CG source is constant, our condenser requires just a constant number of blocks $t$ to produce an output with entropy rate $0.9$, say. In the low entropy regime, using $t=$ poly$(n)$ blocks, our condenser can achieve output entropy rate $0.9$ even if each block has just $1$ bit of min-entropy. Moreover, these condensers have exponentially small error. Finally, we provide strong existential and impossibility results. For our existential result, we show that a random function is a seedless condenser (with surprisingly strong parameters) for any small family of sources. As a corollary, we get new existential results for seeded condensers and condensers for CG sources. For our impossibility result, we show the latter result is nearly tight, by giving a simple proof that the output of any condenser for CG sources must inherit the entropy gap of (one block of) its input.<|reference_end|> | arxiv | @article{goodman2024improved,
title={Improved Condensers for Chor-Goldreich Sources},
author={Jesse Goodman, Xin Li, David Zuckerman},
journal={arXiv preprint arXiv:2410.08142},
year={2024},
archivePrefix={arXiv},
eprint={2410.08142},
primaryClass={cs.CC}
} | goodman2024improved |
arxiv-668221 | 2410.08143 | DelTA: An Online Document-Level Translation Agent Based on Multi-Level Memory | <|reference_start|>DelTA: An Online Document-Level Translation Agent Based on Multi-Level Memory: Large language models (LLMs) have achieved reasonable quality improvements in machine translation (MT). However, most current research on MT-LLMs still faces significant challenges in maintaining translation consistency and accuracy when processing entire documents. In this paper, we introduce DelTA, a Document-levEL Translation Agent designed to overcome these limitations. DelTA features a multi-level memory structure that stores information across various granularities and spans, including Proper Noun Records, Bilingual Summary, Long-Term Memory, and Short-Term Memory, which are continuously retrieved and updated by auxiliary LLM-based components. Experimental results indicate that DelTA significantly outperforms strong baselines in terms of translation consistency and quality across four open/closed-source LLMs and two representative document translation datasets, achieving an increase in consistency scores by up to 4.58 percentage points and in COMET scores by up to 3.16 points on average. DelTA employs a sentence-by-sentence translation strategy, ensuring no sentence omissions and offering a memory-efficient solution compared to the mainstream method. Furthermore, DelTA improves pronoun translation accuracy, and the summary component of the agent also shows promise as a tool for query-based summarization tasks. We release our code and data at https://github.com/YutongWang1216/DocMTAgent.<|reference_end|> | arxiv | @article{wang2024delta:,
title={DelTA: An Online Document-Level Translation Agent Based on Multi-Level
Memory},
author={Yutong Wang, Jiali Zeng, Xuebo Liu, Derek F. Wong, Fandong Meng, Jie
Zhou, Min Zhang},
journal={arXiv preprint arXiv:2410.08143},
year={2024},
archivePrefix={arXiv},
eprint={2410.08143},
primaryClass={cs.CL cs.AI}
} | wang2024delta: |
arxiv-668222 | 2410.08145 | Insight Over Sight? Exploring the Vision-Knowledge Conflicts in Multimodal LLMs | <|reference_start|>Insight Over Sight? Exploring the Vision-Knowledge Conflicts in Multimodal LLMs: This paper explores the problem of commonsense-level vision-knowledge conflict in Multimodal Large Language Models (MLLMs), where visual information contradicts model's internal commonsense knowledge (see Figure 1). To study this issue, we introduce an automated pipeline, augmented with human-in-the-loop quality control, to establish a benchmark aimed at simulating and assessing the conflicts in MLLMs. Utilizing this pipeline, we have crafted a diagnostic benchmark comprising 374 original images and 1,122 high-quality question-answer (QA) pairs. This benchmark covers two types of conflict target and three question difficulty levels, providing a thorough assessment tool. Through this benchmark, we evaluate the conflict-resolution capabilities of nine representative MLLMs across various model families and find a noticeable over-reliance on textual queries. Drawing on these findings, we propose a novel prompting strategy, "Focus-on-Vision" (FoV), which markedly enhances MLLMs' ability to favor visual data over conflicting textual knowledge. Our detailed analysis and the newly proposed strategy significantly advance the understanding and mitigating of vision-knowledge conflicts in MLLMs. The data and code are made publicly available.<|reference_end|> | arxiv | @article{liu2024insight,
title={Insight Over Sight? Exploring the Vision-Knowledge Conflicts in
Multimodal LLMs},
author={Xiaoyuan Liu, Wenxuan Wang, Youliang Yuan, Jen-tse Huang, Qiuzhi Liu,
Pinjia He, Zhaopeng Tu},
journal={arXiv preprint arXiv:2410.08145},
year={2024},
archivePrefix={arXiv},
eprint={2410.08145},
primaryClass={cs.CL cs.CV}
} | liu2024insight |
arxiv-668223 | 2410.08146 | Rewarding Progress: Scaling Automated Process Verifiers for LLM Reasoning | <|reference_start|>Rewarding Progress: Scaling Automated Process Verifiers for LLM Reasoning: A promising approach for improving reasoning in large language models is to use process reward models (PRMs). PRMs provide feedback at each step of a multi-step reasoning trace, potentially improving credit assignment over outcome reward models (ORMs) that only provide feedback at the final step. However, collecting dense, per-step human labels is not scalable, and training PRMs from automatically-labeled data has thus far led to limited gains. To improve a base policy by running search against a PRM or using it as dense rewards for reinforcement learning (RL), we ask: "How should we design process rewards?". Our key insight is that, to be effective, the process reward for a step should measure progress: a change in the likelihood of producing a correct response in the future, before and after taking the step, corresponding to the notion of step-level advantages in RL. Crucially, this progress should be measured under a prover policy distinct from the base policy. We theoretically characterize the set of good provers and our results show that optimizing process rewards from such provers improves exploration during test-time search and online RL. In fact, our characterization shows that weak prover policies can substantially improve a stronger base policy, which we also observe empirically. We validate our claims by training process advantage verifiers (PAVs) to predict progress under such provers, and show that compared to ORMs, test-time search against PAVs is $>8\%$ more accurate, and $1.5-5\times$ more compute-efficient. Online RL with dense rewards from PAVs enables one of the first results with $5-6\times$ gain in sample efficiency, and $>6\%$ gain in accuracy, over ORMs.<|reference_end|> | arxiv | @article{setlur2024rewarding,
title={Rewarding Progress: Scaling Automated Process Verifiers for LLM
Reasoning},
author={Amrith Setlur, Chirag Nagpal, Adam Fisch, Xinyang Geng, Jacob
Eisenstein, Rishabh Agarwal, Alekh Agarwal, Jonathan Berant, Aviral Kumar},
journal={arXiv preprint arXiv:2410.08146},
year={2024},
archivePrefix={arXiv},
eprint={2410.08146},
primaryClass={cs.LG cs.CL}
} | setlur2024rewarding |
arxiv-668224 | 2410.08147 | The Bouc-Wen Model for Binary Direct Collinear Collisions of Convex Viscoplastic Bodies | <|reference_start|>The Bouc-Wen Model for Binary Direct Collinear Collisions of Convex Viscoplastic Bodies: We study mathematical models of binary direct collinear collisions of convex viscoplastic bodies based on two incremental collision laws that employ the Bouc-Wen differential model of hysteresis to represent the elastoplastic behavior of the materials of the colliding bodies. These collision laws are the Bouc-Wen-Simon-Hunt-Crossley collision law (BWSHCCL) and the Bouc-Wen-Maxwell collision law (BWMCL). The BWSHCCL comprises of the Bouc-Wen model amended with the nonlinear Hertzian elastic spring element and connected in parallel to a nonlinear displacement-dependent and rate-dependent energy dissipation element. The BWMCL comprises of the Bouc-Wen model amended with the nonlinear Hertzian elastic spring element and connected in series to a linear rate-dependent energy dissipation element. The mathematical models of the collision process are presented in the form of finite-dimensional initial value problems. We show that the models possess favorable analytical properties (e.g., global existence, uniqueness and boundedness of the solutions) under suitable restrictions on the ranges of their parameters. Furthermore, we show that excellent agreement can be achieved between the experimental data and the data from the numerical simulation of the mathematical models across a wide range of initial relative velocities and material properties of the colliding bodies while using parameterizations that are independent of the initial relative velocity.<|reference_end|> | arxiv | @article{milehins2024the,
title={The Bouc-Wen Model for Binary Direct Collinear Collisions of Convex
Viscoplastic Bodies},
author={Mihails Milehins and Dan B. Marghitu},
journal={arXiv preprint arXiv:2410.08147},
year={2024},
archivePrefix={arXiv},
eprint={2410.08147},
primaryClass={physics.class-ph cs.SY eess.SY}
} | milehins2024the |
arxiv-668225 | 2410.08151 | Progressive Autoregressive Video Diffusion Models | <|reference_start|>Progressive Autoregressive Video Diffusion Models: Current frontier video diffusion models have demonstrated remarkable results at generating high-quality videos. However, they can only generate short video clips, normally around 10 seconds or 240 frames, due to computation limitations during training. In this work, we show that existing models can be naturally extended to autoregressive video diffusion models without changing the architectures. Our key idea is to assign the latent frames with progressively increasing noise levels rather than a single noise level, which allows for fine-grained condition among the latents and large overlaps between the attention windows. Such progressive video denoising allows our models to autoregressively generate video frames without quality degradation or abrupt scene changes. We present state-of-the-art results on long video generation at 1 minute (1440 frames at 24 FPS). Videos from this paper are available at https://desaixie.github.io/pa-vdm/.<|reference_end|> | arxiv | @article{xie2024progressive,
title={Progressive Autoregressive Video Diffusion Models},
author={Desai Xie, Zhan Xu, Yicong Hong, Hao Tan, Difan Liu, Feng Liu, Arie
Kaufman, Yang Zhou},
journal={arXiv preprint arXiv:2410.08151},
year={2024},
archivePrefix={arXiv},
eprint={2410.08151},
primaryClass={cs.CV cs.LG}
} | xie2024progressive |
arxiv-668226 | 2410.08152 | RayEmb: Arbitrary Landmark Detection in X-Ray Images Using Ray Embedding Subspace | <|reference_start|>RayEmb: Arbitrary Landmark Detection in X-Ray Images Using Ray Embedding Subspace: Intra-operative 2D-3D registration of X-ray images with pre-operatively acquired CT scans is a crucial procedure in orthopedic surgeries. Anatomical landmarks pre-annotated in the CT volume can be detected in X-ray images to establish 2D-3D correspondences, which are then utilized for registration. However, registration often fails in certain view angles due to poor landmark visibility. We propose a novel method to address this issue by detecting arbitrary landmark points in X-ray images. Our approach represents 3D points as distinct subspaces, formed by feature vectors (referred to as ray embeddings) corresponding to intersecting rays. Establishing 2D-3D correspondences then becomes a task of finding ray embeddings that are close to a given subspace, essentially performing an intersection test. Unlike conventional methods for landmark estimation, our approach eliminates the need for manually annotating fixed landmarks. We trained our model using the synthetic images generated from CTPelvic1K CLINIC dataset, which contains 103 CT volumes, and evaluated it on the DeepFluoro dataset, comprising real X-ray images. Experimental results demonstrate the superiority of our method over conventional methods. The code is available at https://github.com/Pragyanstha/rayemb.<|reference_end|> | arxiv | @article{shrestha2024rayemb:,
title={RayEmb: Arbitrary Landmark Detection in X-Ray Images Using Ray Embedding
Subspace},
author={Pragyan Shrestha, Chun Xie, Yuichi Yoshii, Itaru Kitahara},
journal={arXiv preprint arXiv:2410.08152},
year={2024},
archivePrefix={arXiv},
eprint={2410.08152},
primaryClass={cs.CV}
} | shrestha2024rayemb: |
arxiv-668227 | 2410.08159 | DART: Denoising Autoregressive Transformer for Scalable Text-to-Image Generation | <|reference_start|>DART: Denoising Autoregressive Transformer for Scalable Text-to-Image Generation: Diffusion models have become the dominant approach for visual generation. They are trained by denoising a Markovian process that gradually adds noise to the input. We argue that the Markovian property limits the models ability to fully utilize the generation trajectory, leading to inefficiencies during training and inference. In this paper, we propose DART, a transformer-based model that unifies autoregressive (AR) and diffusion within a non-Markovian framework. DART iteratively denoises image patches spatially and spectrally using an AR model with the same architecture as standard language models. DART does not rely on image quantization, enabling more effective image modeling while maintaining flexibility. Furthermore, DART seamlessly trains with both text and image data in a unified model. Our approach demonstrates competitive performance on class-conditioned and text-to-image generation tasks, offering a scalable, efficient alternative to traditional diffusion models. Through this unified framework, DART sets a new benchmark for scalable, high-quality image synthesis.<|reference_end|> | arxiv | @article{gu2024dart:,
title={DART: Denoising Autoregressive Transformer for Scalable Text-to-Image
Generation},
author={Jiatao Gu, Yuyang Wang, Yizhe Zhang, Qihang Zhang, Dinghuai Zhang,
Navdeep Jaitly, Josh Susskind, Shuangfei Zhai},
journal={arXiv preprint arXiv:2410.08159},
year={2024},
archivePrefix={arXiv},
eprint={2410.08159},
primaryClass={cs.CV cs.LG}
} | gu2024dart: |
arxiv-668228 | 2410.08160 | Optimal Strategies for Winning Certain Coset-Guessing Quantum Games | <|reference_start|>Optimal Strategies for Winning Certain Coset-Guessing Quantum Games: In a recently introduced coset guessing game, Alice plays against Bob and Charlie, aiming to meet a joint winning condition. Bob and Charlie can only communicate before the game starts to devise a joint strategy. The game we consider begins with Alice preparing a 2m-qubit quantum state based on a random selection of three parameters. She sends the first m qubits to Bob and the rest to Charlie and then reveals to them her choice for one of the parameters. Bob is supposed to guess one of the hidden parameters, Charlie the other, and they win if both guesses are correct. From previous work, we know that the probability of Bob's and Charlie's guesses being simultaneously correct goes to zero exponentially as m increases. We derive a tight upper bound on this probability and show how Bob and Charlie can achieve it. While developing the optimal strategy, we devised an encoding circuit using only CNOT and Hadamard gates, which could be relevant for building efficient CSS-coded systems. We found that the role of quantum information that Alice communicates to Bob and Charlie is to make their responses correlated rather than improve their individual (marginal) correct guessing rates.<|reference_end|> | arxiv | @article{schleppy2024optimal,
title={Optimal Strategies for Winning Certain Coset-Guessing Quantum Games},
author={Michael Schleppy, Emina Soljanin, Nicolas Swanson},
journal={arXiv preprint arXiv:2410.08160},
year={2024},
archivePrefix={arXiv},
eprint={2410.08160},
primaryClass={quant-ph cs.IT math.IT}
} | schleppy2024optimal |
arxiv-668229 | 2410.08162 | The Effect of Surprisal on Reading Times in Information Seeking and Repeated Reading | <|reference_start|>The Effect of Surprisal on Reading Times in Information Seeking and Repeated Reading: The effect of surprisal on processing difficulty has been a central topic of investigation in psycholinguistics. Here, we use eyetracking data to examine three language processing regimes that are common in daily life but have not been addressed with respect to this question: information seeking, repeated processing, and the combination of the two. Using standard regime-agnostic surprisal estimates we find that the prediction of surprisal theory regarding the presence of a linear effect of surprisal on processing times, extends to these regimes. However, when using surprisal estimates from regime-specific contexts that match the contexts and tasks given to humans, we find that in information seeking, such estimates do not improve the predictive power of processing times compared to standard surprisals. Further, regime-specific contexts yield near zero surprisal estimates with no predictive power for processing times in repeated reading. These findings point to misalignments of task and memory representations between humans and current language models, and question the extent to which such models can be used for estimating cognitively relevant quantities. We further discuss theoretical challenges posed by these results.<|reference_end|> | arxiv | @article{klein2024the,
title={The Effect of Surprisal on Reading Times in Information Seeking and
Repeated Reading},
author={Keren Gruteke Klein, Yoav Meiri, Omer Shubi, Yevgeni Berzak},
journal={arXiv preprint arXiv:2410.08162},
year={2024},
archivePrefix={arXiv},
eprint={2410.08162},
primaryClass={cs.CL}
} | klein2024the |
arxiv-668230 | 2410.08164 | Agent S: An Open Agentic Framework that Uses Computers Like a Human | <|reference_start|>Agent S: An Open Agentic Framework that Uses Computers Like a Human: We present Agent S, an open agentic framework that enables autonomous interaction with computers through a Graphical User Interface (GUI), aimed at transforming human-computer interaction by automating complex, multi-step tasks. Agent S aims to address three key challenges in automating computer tasks: acquiring domain-specific knowledge, planning over long task horizons, and handling dynamic, non-uniform interfaces. To this end, Agent S introduces experience-augmented hierarchical planning, which learns from external knowledge search and internal experience retrieval at multiple levels, facilitating efficient task planning and subtask execution. In addition, it employs an Agent-Computer Interface (ACI) to better elicit the reasoning and control capabilities of GUI agents based on Multimodal Large Language Models (MLLMs). Evaluation on the OSWorld benchmark shows that Agent S outperforms the baseline by 9.37% on success rate (an 83.6% relative improvement) and achieves a new state-of-the-art. Comprehensive analysis highlights the effectiveness of individual components and provides insights for future improvements. Furthermore, Agent S demonstrates broad generalizability to different operating systems on a newly-released WindowsAgentArena benchmark. Code available at https://github.com/simular-ai/Agent-S.<|reference_end|> | arxiv | @article{agashe2024agent,
title={Agent S: An Open Agentic Framework that Uses Computers Like a Human},
author={Saaket Agashe, Jiuzhou Han, Shuyu Gan, Jiachen Yang, Ang Li, Xin Eric
Wang},
journal={arXiv preprint arXiv:2410.08164},
year={2024},
archivePrefix={arXiv},
eprint={2410.08164},
primaryClass={cs.AI cs.CL cs.CV}
} | agashe2024agent |
arxiv-668231 | 2410.08165 | Visual Scratchpads: Enabling Global Reasoning in Vision | <|reference_start|>Visual Scratchpads: Enabling Global Reasoning in Vision: Modern vision models have achieved remarkable success in benchmarks where local features provide critical information about the target. There is now a growing interest in solving tasks that require more global reasoning, where local features offer no significant information. These tasks are reminiscent of the connectivity tasks discussed by Minsky and Papert in 1969, which exposed the limitations of the perceptron model and contributed to the first AI winter. In this paper, we revisit such tasks by introducing four global visual benchmarks involving path findings and mazes. We show that: (1) although today's large vision models largely surpass the expressivity limitations of the early models, they still struggle with the learning efficiency; we put forward the "globality degree" notion to understand this limitation; (2) we then demonstrate that the picture changes and global reasoning becomes feasible with the introduction of "visual scratchpads"; similarly to the text scratchpads and chain-of-thoughts used in language models, visual scratchpads help break down global tasks into simpler ones; (3) we finally show that some scratchpads are better than others, in particular, "inductive scratchpads" that take steps relying on less information afford better out-of-distribution generalization and succeed for smaller model sizes.<|reference_end|> | arxiv | @article{lotfi2024visual,
title={Visual Scratchpads: Enabling Global Reasoning in Vision},
author={Aryo Lotfi, Enrico Fini, Samy Bengio, Moin Nabi, Emmanuel Abbe},
journal={arXiv preprint arXiv:2410.08165},
year={2024},
archivePrefix={arXiv},
eprint={2410.08165},
primaryClass={cs.LG cs.CV}
} | lotfi2024visual |
arxiv-668232 | 2410.08168 | ZeroComp: Zero-shot Object Compositing from Image Intrinsics via Diffusion | <|reference_start|>ZeroComp: Zero-shot Object Compositing from Image Intrinsics via Diffusion: We present ZeroComp, an effective zero-shot 3D object compositing approach that does not require paired composite-scene images during training. Our method leverages ControlNet to condition from intrinsic images and combines it with a Stable Diffusion model to utilize its scene priors, together operating as an effective rendering engine. During training, ZeroComp uses intrinsic images based on geometry, albedo, and masked shading, all without the need for paired images of scenes with and without composite objects. Once trained, it seamlessly integrates virtual 3D objects into scenes, adjusting shading to create realistic composites. We developed a high-quality evaluation dataset and demonstrate that ZeroComp outperforms methods using explicit lighting estimations and generative techniques in quantitative and human perception benchmarks. Additionally, ZeroComp extends to real and outdoor image compositing, even when trained solely on synthetic indoor data, showcasing its effectiveness in image compositing.<|reference_end|> | arxiv | @article{zhang2024zerocomp:,
title={ZeroComp: Zero-shot Object Compositing from Image Intrinsics via
Diffusion},
author={Zitian Zhang, Fr'ed'eric Fortier-Chouinard, Mathieu Garon, Anand
Bhattad, Jean-Franc{c}ois Lalonde},
journal={arXiv preprint arXiv:2410.08168},
year={2024},
archivePrefix={arXiv},
eprint={2410.08168},
primaryClass={cs.CV}
} | zhang2024zerocomp: |
arxiv-668233 | 2410.08170 | Simple Length-Constrained Minimum Spanning Trees | <|reference_start|>Simple Length-Constrained Minimum Spanning Trees: In the length-constrained minimum spanning tree (MST) problem, we are given an $n$-node edge-weighted graph $G$ and a length constraint $h \geq 1$. Our goal is to find a spanning tree of $G$ whose diameter is at most $h$ with minimum weight. Prior work of Marathe et al.\ gave a poly-time algorithm which repeatedly computes maximum cardinality matchings of minimum weight to output a spanning tree whose weight is $O(\log n)$-approximate with diameter $O(\log n)\cdot h$. In this work, we show that a simple random sampling approach recovers the results of Marathe et al. -- no computation of min-weight max-matchings needed! Furthermore, the simplicity of our approach allows us to tradeoff between the approximation factor and the loss in diameter: we show that for any $\epsilon \geq 1/\operatorname{poly}(n)$, one can output a spanning tree whose weight is $O(n^\epsilon / \epsilon)$-approximate with diameter $O(1/\epsilon)\cdot h$ with high probability in poly-time. This immediately gives the first poly-time $\operatorname{poly}(\log n)$-approximation for length-constrained MST whose loss in diameter is $o(\log n)$.<|reference_end|> | arxiv | @article{hershkowitz2024simple,
title={Simple Length-Constrained Minimum Spanning Trees},
author={D Ellis Hershkowitz and Richard Z Huang},
journal={arXiv preprint arXiv:2410.08170},
year={2024},
archivePrefix={arXiv},
eprint={2410.08170},
primaryClass={cs.DS}
} | hershkowitz2024simple |
arxiv-668234 | 2410.08172 | On the Evaluation of Generative Robotic Simulations | <|reference_start|>On the Evaluation of Generative Robotic Simulations: Due to the difficulty of acquiring extensive real-world data, robot simulation has become crucial for parallel training and sim-to-real transfer, highlighting the importance of scalable simulated robotic tasks. Foundation models have demonstrated impressive capacities in autonomously generating feasible robotic tasks. However, this new paradigm underscores the challenge of adequately evaluating these autonomously generated tasks. To address this, we propose a comprehensive evaluation framework tailored to generative simulations. Our framework segments evaluation into three core aspects: quality, diversity, and generalization. For single-task quality, we evaluate the realism of the generated task and the completeness of the generated trajectories using large language models and vision-language models. In terms of diversity, we measure both task and data diversity through text similarity of task descriptions and world model loss trained on collected task trajectories. For task-level generalization, we assess the zero-shot generalization ability on unseen tasks of a policy trained with multiple generated tasks. Experiments conducted on three representative task generation pipelines demonstrate that the results from our framework are highly consistent with human evaluations, confirming the feasibility and validity of our approach. The findings reveal that while metrics of quality and diversity can be achieved through certain methods, no single approach excels across all metrics, suggesting a need for greater focus on balancing these different metrics. Additionally, our analysis further highlights the common challenge of low generalization capability faced by current works. Our anonymous website: https://sites.google.com/view/evaltasks.<|reference_end|> | arxiv | @article{chen2024on,
title={On the Evaluation of Generative Robotic Simulations},
author={Feng Chen, Botian Xu, Pu Hua, Peiqi Duan, Yanchao Yang, Yi Ma, Huazhe
Xu},
journal={arXiv preprint arXiv:2410.08172},
year={2024},
archivePrefix={arXiv},
eprint={2410.08172},
primaryClass={cs.RO cs.AI cs.CV cs.LG}
} | chen2024on |
arxiv-668235 | 2410.08174 | Sample then Identify: A General Framework for Risk Control and Assessment in Multimodal Large Language Models | <|reference_start|>Sample then Identify: A General Framework for Risk Control and Assessment in Multimodal Large Language Models: Multimodal Large Language Models (MLLMs) exhibit promising advancements across various tasks, yet they still encounter significant trustworthiness issues. Prior studies apply Split Conformal Prediction (SCP) in language modeling to construct prediction sets with statistical guarantees. However, these methods typically rely on internal model logits or are restricted to multiple-choice settings, which hampers their generalizability and adaptability in dynamic, open-ended environments. In this paper, we introduce TRON, a two-step framework for risk control and assessment, applicable to any MLLM that supports sampling in both open-ended and closed-ended scenarios. TRON comprises two main components: (1) a novel conformal score to sample response sets of minimum size, and (2) a nonconformity score to identify high-quality responses based on self-consistency theory, controlling the error rates by two specific risk levels. Furthermore, we investigate semantic redundancy in prediction sets within open-ended contexts for the first time, leading to a promising evaluation metric for MLLMs based on average set size. Our comprehensive experiments across four Video Question-Answering (VideoQA) datasets utilizing eight MLLMs show that TRON achieves desired error rates bounded by two user-specified risk levels. Additionally, deduplicated prediction sets maintain adaptiveness while being more efficient and stable for risk assessment under different risk levels.<|reference_end|> | arxiv | @article{wang2024sample,
title={Sample then Identify: A General Framework for Risk Control and
Assessment in Multimodal Large Language Models},
author={Qingni Wang, Tiantian Geng, Zhiyuan Wang, Teng Wang, Bo Fu, Feng Zheng},
journal={arXiv preprint arXiv:2410.08174},
year={2024},
archivePrefix={arXiv},
eprint={2410.08174},
primaryClass={cs.CL cs.AI cs.LG cs.MM}
} | wang2024sample |
arxiv-668236 | 2410.08177 | TANet: Triplet Attention Network for All-In-One Adverse Weather Image Restoration | <|reference_start|>TANet: Triplet Attention Network for All-In-One Adverse Weather Image Restoration: Adverse weather image restoration aims to remove unwanted degraded artifacts, such as haze, rain, and snow, caused by adverse weather conditions. Existing methods achieve remarkable results for addressing single-weather conditions. However, they face challenges when encountering unpredictable weather conditions, which often happen in real-world scenarios. Although different weather conditions exhibit different degradation patterns, they share common characteristics that are highly related and complementary, such as occlusions caused by degradation patterns, color distortion, and contrast attenuation due to the scattering of atmospheric particles. Therefore, we focus on leveraging common knowledge across multiple weather conditions to restore images in a unified manner. In this paper, we propose a Triplet Attention Network (TANet) to efficiently and effectively address all-in-one adverse weather image restoration. TANet consists of Triplet Attention Block (TAB) that incorporates three types of attention mechanisms: Local Pixel-wise Attention (LPA) and Global Strip-wise Attention (GSA) to address occlusions caused by non-uniform degradation patterns, and Global Distribution Attention (GDA) to address color distortion and contrast attenuation caused by atmospheric phenomena. By leveraging common knowledge shared across different weather conditions, TANet successfully addresses multiple weather conditions in a unified manner. Experimental results show that TANet efficiently and effectively achieves state-of-the-art performance in all-in-one adverse weather image restoration. The source code is available at https://github.com/xhuachris/TANet-ACCV-2024.<|reference_end|> | arxiv | @article{wang2024tanet:,
title={TANet: Triplet Attention Network for All-In-One Adverse Weather Image
Restoration},
author={Hsing-Hua Wang, Fu-Jen Tsai, Yen-Yu Lin, and Chia-Wen Lin},
journal={arXiv preprint arXiv:2410.08177},
year={2024},
archivePrefix={arXiv},
eprint={2410.08177},
primaryClass={cs.CV}
} | wang2024tanet: |
arxiv-668237 | 2410.08181 | RGM: Reconstructing High-fidelity 3D Car Assets with Relightable 3D-GS Generative Model from a Single Image | <|reference_start|>RGM: Reconstructing High-fidelity 3D Car Assets with Relightable 3D-GS Generative Model from a Single Image: The generation of high-quality 3D car assets is essential for various applications, including video games, autonomous driving, and virtual reality. Current 3D generation methods utilizing NeRF or 3D-GS as representations for 3D objects, generate a Lambertian object under fixed lighting and lack separated modelings for material and global illumination. As a result, the generated assets are unsuitable for relighting under varying lighting conditions, limiting their applicability in downstream tasks. To address this challenge, we propose a novel relightable 3D object generative framework that automates the creation of 3D car assets, enabling the swift and accurate reconstruction of a vehicle's geometry, texture, and material properties from a single input image. Our approach begins with introducing a large-scale synthetic car dataset comprising over 1,000 high-precision 3D vehicle models. We represent 3D objects using global illumination and relightable 3D Gaussian primitives integrating with BRDF parameters. Building on this representation, we introduce a feed-forward model that takes images as input and outputs both relightable 3D Gaussians and global illumination parameters. Experimental results demonstrate that our method produces photorealistic 3D car assets that can be seamlessly integrated into road scenes with different illuminations, which offers substantial practical benefits for industrial applications.<|reference_end|> | arxiv | @article{chen2024rgm:,
title={RGM: Reconstructing High-fidelity 3D Car Assets with Relightable 3D-GS
Generative Model from a Single Image},
author={Xiaoxue Chen, Jv Zheng, Hao Huang, Haoran Xu, Weihao Gu, Kangliang
Chen, He xiang, Huan-ang Gao, Hao Zhao, Guyue Zhou, Yaqin Zhang},
journal={arXiv preprint arXiv:2410.08181},
year={2024},
archivePrefix={arXiv},
eprint={2410.08181},
primaryClass={cs.CV}
} | chen2024rgm: |
arxiv-668238 | 2410.08182 | MRAG-Bench: Vision-Centric Evaluation for Retrieval-Augmented Multimodal Models | <|reference_start|>MRAG-Bench: Vision-Centric Evaluation for Retrieval-Augmented Multimodal Models: Existing multimodal retrieval benchmarks primarily focus on evaluating whether models can retrieve and utilize external textual knowledge for question answering. However, there are scenarios where retrieving visual information is either more beneficial or easier to access than textual data. In this paper, we introduce a multimodal retrieval-augmented generation benchmark, MRAG-Bench, in which we systematically identify and categorize scenarios where visually augmented knowledge is better than textual knowledge, for instance, more images from varying viewpoints. MRAG-Bench consists of 16,130 images and 1,353 human-annotated multiple-choice questions across 9 distinct scenarios. With MRAG-Bench, we conduct an evaluation of 10 open-source and 4 proprietary large vision-language models (LVLMs). Our results show that all LVLMs exhibit greater improvements when augmented with images compared to textual knowledge, confirming that MRAG-Bench is vision-centric. Additionally, we conduct extensive analysis with MRAG-Bench, which offers valuable insights into retrieval-augmented LVLMs. Notably, the top-performing model, GPT-4o, faces challenges in effectively leveraging retrieved knowledge, achieving only a 5.82% improvement with ground-truth information, in contrast to a 33.16% improvement observed in human participants. These findings highlight the importance of MRAG-Bench in encouraging the community to enhance LVLMs' ability to utilize retrieved visual knowledge more effectively.<|reference_end|> | arxiv | @article{hu2024mrag-bench:,
title={MRAG-Bench: Vision-Centric Evaluation for Retrieval-Augmented Multimodal
Models},
author={Wenbo Hu, Jia-Chen Gu, Zi-Yi Dou, Mohsen Fayyaz, Pan Lu, Kai-Wei
Chang, Nanyun Peng},
journal={arXiv preprint arXiv:2410.08182},
year={2024},
archivePrefix={arXiv},
eprint={2410.08182},
primaryClass={cs.CV cs.AI cs.CL}
} | hu2024mrag-bench: |
arxiv-668239 | 2410.08184 | Scaling Laws For Diffusion Transformers | <|reference_start|>Scaling Laws For Diffusion Transformers: Diffusion transformers (DiT) have already achieved appealing synthesis and scaling properties in content recreation, e.g., image and video generation. However, scaling laws of DiT are less explored, which usually offer precise predictions regarding optimal model size and data requirements given a specific compute budget. Therefore, experiments across a broad range of compute budgets, from 1e17 to 6e18 FLOPs are conducted to confirm the existence of scaling laws in DiT for the first time. Concretely, the loss of pretraining DiT also follows a power-law relationship with the involved compute. Based on the scaling law, we can not only determine the optimal model size and required data but also accurately predict the text-to-image generation loss given a model with 1B parameters and a compute budget of 1e21 FLOPs. Additionally, we also demonstrate that the trend of pre-training loss matches the generation performances (e.g., FID), even across various datasets, which complements the mapping from compute to synthesis quality and thus provides a predictable benchmark that assesses model performance and data quality at a reduced cost.<|reference_end|> | arxiv | @article{liang2024scaling,
title={Scaling Laws For Diffusion Transformers},
author={Zhengyang Liang, Hao He, Ceyuan Yang, Bo Dai},
journal={arXiv preprint arXiv:2410.08184},
year={2024},
archivePrefix={arXiv},
eprint={2410.08184},
primaryClass={cs.CV}
} | liang2024scaling |
arxiv-668240 | 2410.08186 | Probabilistically Input-to-State Stable Stochastic Model Predictive Control | <|reference_start|>Probabilistically Input-to-State Stable Stochastic Model Predictive Control: Employing model predictive control to systems with unbounded, stochastic disturbances poses the challenge of guaranteeing safety, i.e., repeated feasibility and stability of the closed-loop system. Especially, there are no strict repeated feasibility guarantees for standard stochastic MPC formulations. Thus, traditional stability proofs are not straightforwardly applicable. We exploit the concept of input-to-state stability in probability and outline how it can be used to provide stability guarantees, circumventing the requirement for strict repeated feasibility guarantees. Loss of feasibility is captured by a back-up controller, which is explicitly taken into account in the stability analysis. We illustrate our findings using a numeric example.<|reference_end|> | arxiv | @article{pfefferkorn2024probabilistically,
title={Probabilistically Input-to-State Stable Stochastic Model Predictive
Control},
author={Maik Pfefferkorn, Rolf Findeisen},
journal={arXiv preprint arXiv:2410.08186},
year={2024},
archivePrefix={arXiv},
eprint={2410.08186},
primaryClass={eess.SY cs.SY}
} | pfefferkorn2024probabilistically |
arxiv-668241 | 2410.08187 | Comparing Mass-Preserving Numerical Methods for the Lithium-Ion Battery Single Particle Model | <|reference_start|>Comparing Mass-Preserving Numerical Methods for the Lithium-Ion Battery Single Particle Model: The single particle model (SPM) is a reduced electrochemical model that holds promise for applications in battery management systems due to its ability to accurately capture battery dynamics; however, the numerical discretization of the SPM requires careful consideration to ensure numerical stability and accuracy. In this paper, we present a comparative study of two mass-preserving numerical schemes for the SPM: the finite volume method and the control volume method. Using numerical simulations, we systematically evaluate the performance of these schemes, after independently calibrating the SPM discretized with each scheme to experimental data, and find a tradeoff between accuracy (quantified by voltage root-mean-square error) and computational time. Our findings provide insights into the selection of numerical schemes for the SPM, contributing to the advancement of battery modeling and simulation techniques.<|reference_end|> | arxiv | @article{lucero2024comparing,
title={Comparing Mass-Preserving Numerical Methods for the Lithium-Ion Battery
Single Particle Model},
author={Joseph N. E. Lucero, Le Xu, Simona Onori},
journal={arXiv preprint arXiv:2410.08187},
year={2024},
archivePrefix={arXiv},
eprint={2410.08187},
primaryClass={eess.SY cs.SY}
} | lucero2024comparing |
arxiv-668242 | 2410.08188 | DifFRelight: Diffusion-Based Facial Performance Relighting | <|reference_start|>DifFRelight: Diffusion-Based Facial Performance Relighting: We present a novel framework for free-viewpoint facial performance relighting using diffusion-based image-to-image translation. Leveraging a subject-specific dataset containing diverse facial expressions captured under various lighting conditions, including flat-lit and one-light-at-a-time (OLAT) scenarios, we train a diffusion model for precise lighting control, enabling high-fidelity relit facial images from flat-lit inputs. Our framework includes spatially-aligned conditioning of flat-lit captures and random noise, along with integrated lighting information for global control, utilizing prior knowledge from the pre-trained Stable Diffusion model. This model is then applied to dynamic facial performances captured in a consistent flat-lit environment and reconstructed for novel-view synthesis using a scalable dynamic 3D Gaussian Splatting method to maintain quality and consistency in the relit results. In addition, we introduce unified lighting control by integrating a novel area lighting representation with directional lighting, allowing for joint adjustments in light size and direction. We also enable high dynamic range imaging (HDRI) composition using multiple directional lights to produce dynamic sequences under complex lighting conditions. Our evaluations demonstrate the models efficiency in achieving precise lighting control and generalizing across various facial expressions while preserving detailed features such as skintexture andhair. The model accurately reproduces complex lighting effects like eye reflections, subsurface scattering, self-shadowing, and translucency, advancing photorealism within our framework.<|reference_end|> | arxiv | @article{he2024diffrelight:,
title={DifFRelight: Diffusion-Based Facial Performance Relighting},
author={Mingming He, Pascal Clausen, Ahmet Levent Tac{s}el, Li Ma, Oliver
Pilarski, Wenqi Xian, Laszlo Rikker, Xueming Yu, Ryan Burgert, Ning Yu, Paul
Debevec},
journal={arXiv preprint arXiv:2410.08188},
year={2024},
doi={10.1145/3680528.3687644},
archivePrefix={arXiv},
eprint={2410.08188},
primaryClass={cs.CV cs.AI cs.GR}
} | he2024diffrelight: |
arxiv-668243 | 2410.08189 | SG-Nav: Online 3D Scene Graph Prompting for LLM-based Zero-shot Object Navigation | <|reference_start|>SG-Nav: Online 3D Scene Graph Prompting for LLM-based Zero-shot Object Navigation: In this paper, we propose a new framework for zero-shot object navigation. Existing zero-shot object navigation methods prompt LLM with the text of spatially closed objects, which lacks enough scene context for in-depth reasoning. To better preserve the information of environment and fully exploit the reasoning ability of LLM, we propose to represent the observed scene with 3D scene graph. The scene graph encodes the relationships between objects, groups and rooms with a LLM-friendly structure, for which we design a hierarchical chain-of-thought prompt to help LLM reason the goal location according to scene context by traversing the nodes and edges. Moreover, benefit from the scene graph representation, we further design a re-perception mechanism to empower the object navigation framework with the ability to correct perception error. We conduct extensive experiments on MP3D, HM3D and RoboTHOR environments, where SG-Nav surpasses previous state-of-the-art zero-shot methods by more than 10% SR on all benchmarks, while the decision process is explainable. To the best of our knowledge, SG-Nav is the first zero-shot method that achieves even higher performance than supervised object navigation methods on the challenging MP3D benchmark.<|reference_end|> | arxiv | @article{yin2024sg-nav:,
title={SG-Nav: Online 3D Scene Graph Prompting for LLM-based Zero-shot Object
Navigation},
author={Hang Yin and Xiuwei Xu and Zhenyu Wu and Jie Zhou and Jiwen Lu},
journal={arXiv preprint arXiv:2410.08189},
year={2024},
archivePrefix={arXiv},
eprint={2410.08189},
primaryClass={cs.CV cs.RO}
} | yin2024sg-nav: |
arxiv-668244 | 2410.08190 | Poison-splat: Computation Cost Attack on 3D Gaussian Splatting | <|reference_start|>Poison-splat: Computation Cost Attack on 3D Gaussian Splatting: 3D Gaussian splatting (3DGS), known for its groundbreaking performance and efficiency, has become a dominant 3D representation and brought progress to many 3D vision tasks. However, in this work, we reveal a significant security vulnerability that has been largely overlooked in 3DGS: the computation cost of training 3DGS could be maliciously tampered by poisoning the input data. By developing an attack named Poison-splat, we reveal a novel attack surface where the adversary can poison the input images to drastically increase the computation memory and time needed for 3DGS training, pushing the algorithm towards its worst computation complexity. In extreme cases, the attack can even consume all allocable memory, leading to a Denial-of-Service (DoS) that disrupts servers, resulting in practical damages to real-world 3DGS service vendors. Such a computation cost attack is achieved by addressing a bi-level optimization problem through three tailored strategies: attack objective approximation, proxy model rendering, and optional constrained optimization. These strategies not only ensure the effectiveness of our attack but also make it difficult to defend with simple defensive measures. We hope the revelation of this novel attack surface can spark attention to this crucial yet overlooked vulnerability of 3DGS systems.<|reference_end|> | arxiv | @article{lu2024poison-splat:,
title={Poison-splat: Computation Cost Attack on 3D Gaussian Splatting},
author={Jiahao Lu, Yifan Zhang, Qiuhong Shen, Xinchao Wang, Shuicheng Yan},
journal={arXiv preprint arXiv:2410.08190},
year={2024},
archivePrefix={arXiv},
eprint={2410.08190},
primaryClass={cs.CV cs.CR cs.GR cs.LG}
} | lu2024poison-splat: |
arxiv-668245 | 2410.08192 | HybridBooth: Hybrid Prompt Inversion for Efficient Subject-Driven Generation | <|reference_start|>HybridBooth: Hybrid Prompt Inversion for Efficient Subject-Driven Generation: Recent advancements in text-to-image diffusion models have shown remarkable creative capabilities with textual prompts, but generating personalized instances based on specific subjects, known as subject-driven generation, remains challenging. To tackle this issue, we present a new hybrid framework called HybridBooth, which merges the benefits of optimization-based and direct-regression methods. HybridBooth operates in two stages: the Word Embedding Probe, which generates a robust initial word embedding using a fine-tuned encoder, and the Word Embedding Refinement, which further adapts the encoder to specific subject images by optimizing key parameters. This approach allows for effective and fast inversion of visual concepts into textual embedding, even from a single image, while maintaining the model's generalization capabilities.<|reference_end|> | arxiv | @article{guan2024hybridbooth:,
title={HybridBooth: Hybrid Prompt Inversion for Efficient Subject-Driven
Generation},
author={Shanyan Guan, Yanhao Ge, Ying Tai, Jian Yang, Wei Li, Mingyu You},
journal={arXiv preprint arXiv:2410.08192},
year={2024},
archivePrefix={arXiv},
eprint={2410.08192},
primaryClass={cs.CV}
} | guan2024hybridbooth: |
arxiv-668246 | 2410.08193 | GenARM: Reward Guided Generation with Autoregressive Reward Model for Test-time Alignment | <|reference_start|>GenARM: Reward Guided Generation with Autoregressive Reward Model for Test-time Alignment: Large Language Models (LLMs) exhibit impressive capabilities but require careful alignment with human preferences. Traditional training-time methods finetune LLMs using human preference datasets but incur significant training costs and require repeated training to handle diverse user preferences. Test-time alignment methods address this by using reward models (RMs) to guide frozen LLMs without retraining. However, existing test-time approaches rely on trajectory-level RMs which are designed to evaluate complete responses, making them unsuitable for autoregressive text generation that requires computing next-token rewards from partial responses. To address this, we introduce GenARM, a test-time alignment approach that leverages the Autoregressive Reward Model--a novel reward parametrization designed to predict next-token rewards for efficient and effective autoregressive generation. Theoretically, we demonstrate that this parametrization can provably guide frozen LLMs toward any distribution achievable by traditional RMs within the KL-regularized reinforcement learning framework. Experimental results show that GenARM significantly outperforms prior test-time alignment baselines and matches the performance of training-time methods. Additionally, GenARM enables efficient weak-to-strong guidance, aligning larger LLMs with smaller RMs without the high costs of training larger models. Furthermore, GenARM supports multi-objective alignment, allowing real-time trade-offs between preference dimensions and catering to diverse user preferences without retraining.<|reference_end|> | arxiv | @article{xu2024genarm:,
title={GenARM: Reward Guided Generation with Autoregressive Reward Model for
Test-time Alignment},
author={Yuancheng Xu, Udari Madhushani Sehwag, Alec Koppel, Sicheng Zhu, Bang
An, Furong Huang, Sumitra Ganesh},
journal={arXiv preprint arXiv:2410.08193},
year={2024},
archivePrefix={arXiv},
eprint={2410.08193},
primaryClass={cs.CL}
} | xu2024genarm: |
arxiv-668247 | 2410.08194 | Features are fate: a theory of transfer learning in high-dimensional regression | <|reference_start|>Features are fate: a theory of transfer learning in high-dimensional regression: With the emergence of large-scale pre-trained neural networks, methods to adapt such "foundation" models to data-limited downstream tasks have become a necessity. Fine-tuning, preference optimization, and transfer learning have all been successfully employed for these purposes when the target task closely resembles the source task, but a precise theoretical understanding of "task similarity" is still lacking. While conventional wisdom suggests that simple measures of similarity between source and target distributions, such as $\phi$-divergences or integral probability metrics, can directly predict the success of transfer, we prove the surprising fact that, in general, this is not the case. We adopt, instead, a feature-centric viewpoint on transfer learning and establish a number of theoretical results that demonstrate that when the target task is well represented by the feature space of the pre-trained model, transfer learning outperforms training from scratch. We study deep linear networks as a minimal model of transfer learning in which we can analytically characterize the transferability phase diagram as a function of the target dataset size and the feature space overlap. For this model, we establish rigorously that when the feature space overlap between the source and target tasks is sufficiently strong, both linear transfer and fine-tuning improve performance, especially in the low data limit. These results build on an emerging understanding of feature learning dynamics in deep linear networks, and we demonstrate numerically that the rigorous results we derive for the linear case also apply to nonlinear networks.<|reference_end|> | arxiv | @article{tahir2024features,
title={Features are fate: a theory of transfer learning in high-dimensional
regression},
author={Javan Tahir, Surya Ganguli, Grant M. Rotskoff},
journal={arXiv preprint arXiv:2410.08194},
year={2024},
archivePrefix={arXiv},
eprint={2410.08194},
primaryClass={stat.ML cs.LG}
} | tahir2024features |
arxiv-668248 | 2410.08196 | MathCoder2: Better Math Reasoning from Continued Pretraining on Model-translated Mathematical Code | <|reference_start|>MathCoder2: Better Math Reasoning from Continued Pretraining on Model-translated Mathematical Code: Code has been shown to be effective in enhancing the mathematical reasoning abilities of large language models due to its precision and accuracy. Previous works involving continued mathematical pretraining often include code that utilizes math-related packages, which are primarily designed for fields such as engineering, machine learning, signal processing, or module testing, rather than being directly focused on mathematical reasoning. In this paper, we introduce a novel method for generating mathematical code accompanied with corresponding reasoning steps for continued pretraining. Our approach begins with the construction of a high-quality mathematical continued pretraining dataset by incorporating math-related web data, code using mathematical packages, math textbooks, and synthetic data. Next, we construct reasoning steps by extracting LaTeX expressions, the conditions needed for the expressions, and the results of the expressions from the previously collected dataset. Based on this extracted information, we generate corresponding code to accurately capture the mathematical reasoning process. Appending the generated code to each reasoning step results in data consisting of paired natural language reasoning steps and their corresponding code. Combining this data with the original dataset results in a 19.2B-token high-performing mathematical pretraining corpus, which we name MathCode-Pile. Training several popular base models with this corpus significantly improves their mathematical abilities, leading to the creation of the MathCoder2 family of models. All of our data processing and training code is open-sourced, ensuring full transparency and easy reproducibility of the entire data collection and training pipeline. The code is released at https://github.com/mathllm/MathCoder2 .<|reference_end|> | arxiv | @article{lu2024mathcoder2:,
title={MathCoder2: Better Math Reasoning from Continued Pretraining on
Model-translated Mathematical Code},
author={Zimu Lu, Aojun Zhou, Ke Wang, Houxing Ren, Weikang Shi, Junting Pan,
Mingjie Zhan, Hongsheng Li},
journal={arXiv preprint arXiv:2410.08196},
year={2024},
archivePrefix={arXiv},
eprint={2410.08196},
primaryClass={cs.CL cs.AI cs.CV}
} | lu2024mathcoder2: |
arxiv-668249 | 2410.08197 | From Exploration to Mastery: Enabling LLMs to Master Tools via Self-Driven Interactions | <|reference_start|>From Exploration to Mastery: Enabling LLMs to Master Tools via Self-Driven Interactions: Tool learning enables Large Language Models (LLMs) to interact with external environments by invoking tools, serving as an effective strategy to mitigate the limitations inherent in their pre-training data. In this process, tool documentation plays a crucial role by providing usage instructions for LLMs, thereby facilitating effective tool utilization. This paper concentrates on the critical challenge of bridging the comprehension gap between LLMs and external tools due to the inadequacies and inaccuracies inherent in existing human-centric tool documentation. We propose a novel framework, DRAFT, aimed at Dynamically Refining tool documentation through the Analysis of Feedback and Trails emanating from LLMs' interactions with external tools. This methodology pivots on an innovative trial-and-error approach, consisting of three distinct learning phases: experience gathering, learning from experience, and documentation rewriting, to iteratively enhance the tool documentation. This process is further optimized by implementing a diversity-promoting exploration strategy to ensure explorative diversity and a tool-adaptive termination mechanism to prevent overfitting while enhancing efficiency. Extensive experiments on multiple datasets demonstrate that DRAFT's iterative, feedback-based refinement significantly ameliorates documentation quality, fostering a deeper comprehension and more effective utilization of tools by LLMs. Notably, our analysis reveals that the tool documentation refined via our approach demonstrates robust cross-model generalization capabilities.<|reference_end|> | arxiv | @article{qu2024from,
title={From Exploration to Mastery: Enabling LLMs to Master Tools via
Self-Driven Interactions},
author={Changle Qu, Sunhao Dai, Xiaochi Wei, Hengyi Cai, Shuaiqiang Wang,
Dawei Yin, Jun Xu, Ji-Rong Wen},
journal={arXiv preprint arXiv:2410.08197},
year={2024},
archivePrefix={arXiv},
eprint={2410.08197},
primaryClass={cs.CL cs.AI}
} | qu2024from |
arxiv-668250 | 2410.08198 | Adam Exploits $\ell_\infty$-geometry of Loss Landscape via Coordinate-wise Adaptivity | <|reference_start|>Adam Exploits $\ell_\infty$-geometry of Loss Landscape via Coordinate-wise Adaptivity: Adam outperforms SGD when training language models. Yet this advantage is not well-understood theoretically -- previous convergence analysis for Adam and SGD mainly focuses on the number of steps $T$ and is already minimax-optimal in non-convex cases, which are both $\widetilde{O}(T^{-1/4})$. In this work, we argue that the exploitation of nice $\ell_\infty$-geometry is the key advantage of Adam over SGD. More specifically, we give a new convergence analysis for Adam under novel assumptions that loss is smooth under $\ell_\infty$-geometry rather than the more common $\ell_2$-geometry, which yields a much better empirical smoothness constant for GPT-2 and ResNet models. Our experiments confirm that Adam performs much worse when the favorable $\ell_\infty$-geometry is changed while SGD provably remains unaffected. We also extend the convergence analysis to blockwise Adam under novel blockwise smoothness assumptions.<|reference_end|> | arxiv | @article{xie2024adam,
title={Adam Exploits $\ell_\infty$-geometry of Loss Landscape via
Coordinate-wise Adaptivity},
author={Shuo Xie, Mohamad Amin Mohamadi, Zhiyuan Li},
journal={arXiv preprint arXiv:2410.08198},
year={2024},
archivePrefix={arXiv},
eprint={2410.08198},
primaryClass={cs.LG}
} | xie2024adam |
arxiv-668251 | 2410.08201 | Efficient Dictionary Learning with Switch Sparse Autoencoders | <|reference_start|>Efficient Dictionary Learning with Switch Sparse Autoencoders: Sparse autoencoders (SAEs) are a recent technique for decomposing neural network activations into human-interpretable features. However, in order for SAEs to identify all features represented in frontier models, it will be necessary to scale them up to very high width, posing a computational challenge. In this work, we introduce Switch Sparse Autoencoders, a novel SAE architecture aimed at reducing the compute cost of training SAEs. Inspired by sparse mixture of experts models, Switch SAEs route activation vectors between smaller "expert" SAEs, enabling SAEs to efficiently scale to many more features. We present experiments comparing Switch SAEs with other SAE architectures, and find that Switch SAEs deliver a substantial Pareto improvement in the reconstruction vs. sparsity frontier for a given fixed training compute budget. We also study the geometry of features across experts, analyze features duplicated across experts, and verify that Switch SAE features are as interpretable as features found by other SAE architectures.<|reference_end|> | arxiv | @article{mudide2024efficient,
title={Efficient Dictionary Learning with Switch Sparse Autoencoders},
author={Anish Mudide and Joshua Engels and Eric J. Michaud and Max Tegmark and
Christian Schroeder de Witt},
journal={arXiv preprint arXiv:2410.08201},
year={2024},
archivePrefix={arXiv},
eprint={2410.08201},
primaryClass={cs.LG}
} | mudide2024efficient |
arxiv-668252 | 2410.08202 | Mono-InternVL: Pushing the Boundaries of Monolithic Multimodal Large Language Models with Endogenous Visual Pre-training | <|reference_start|>Mono-InternVL: Pushing the Boundaries of Monolithic Multimodal Large Language Models with Endogenous Visual Pre-training: The rapid advancement of Large Language Models (LLMs) has led to an influx of efforts to extend their capabilities to multimodal tasks. Among them, growing attention has been focused on monolithic Multimodal Large Language Models (MLLMs) that integrate visual encoding and language decoding into a single LLM. Despite the structural simplicity and deployment-friendliness, training a monolithic MLLM with promising performance still remains challenging. In particular, the popular approaches adopt continuous pre-training to extend a pre-trained LLM to a monolithic MLLM, which suffers from catastrophic forgetting and leads to performance degeneration. In this paper, we aim to overcome this limitation from the perspective of delta tuning. Specifically, our core idea is to embed visual parameters into a pre-trained LLM, thereby incrementally learning visual knowledge from massive data via delta tuning, i.e., freezing the LLM when optimizing the visual parameters. Based on this principle, we present Mono-InternVL, a novel monolithic MLLM that seamlessly integrates a set of visual experts via a multimodal mixture-of-experts structure. Moreover, we propose an innovative pre-training strategy to maximize the visual capability of Mono-InternVL, namely Endogenous Visual Pre-training (EViP). In particular, EViP is designed as a progressive learning process for visual experts, which aims to fully exploit the visual knowledge from noisy data to high-quality data. To validate our approach, we conduct extensive experiments on 16 benchmarks. Experimental results not only validate the superior performance of Mono-InternVL compared to the state-of-the-art MLLM on 6 multimodal benchmarks, e.g., +113 points over InternVL-1.5 on OCRBench, but also confirm its better deployment efficiency, with first token latency reduced by up to 67%.<|reference_end|> | arxiv | @article{luo2024mono-internvl:,
title={Mono-InternVL: Pushing the Boundaries of Monolithic Multimodal Large
Language Models with Endogenous Visual Pre-training},
author={Gen Luo, Xue Yang, Wenhan Dou, Zhaokai Wang, Jifeng Dai, Yu Qiao,
Xizhou Zhu},
journal={arXiv preprint arXiv:2410.08202},
year={2024},
archivePrefix={arXiv},
eprint={2410.08202},
primaryClass={cs.CV cs.CL}
} | luo2024mono-internvl: |
arxiv-668253 | 2410.08203 | Complete and bi-continuous invariant of protein backbones under rigid motion | <|reference_start|>Complete and bi-continuous invariant of protein backbones under rigid motion: Proteins are large biomolecules that regulate all living organisms and consist of one or several chains.The primary structure of a protein chain is a sequence of amino acid residues whose three main atoms (alpha-carbon, nitrogen, and carboxyl carbon) form a protein backbone. The tertiary (geometric) structure is the rigid shape of a protein chain represented by atomic positions in a 3-dimensional space. Because different geometric structures often have distinct functional properties, it is important to continuously quantify differences in rigid shapes of protein backbones. Unfortunately, many widely used similarities of proteins fail axioms of a distance metric and discontinuously change under tiny perturbations of atoms. This paper develops a complete invariant under rigid motion, which defines a Lipschitz bi-continuous bijection from all rigid classes of protein backbones to a well-defined invariant space. The new invariant detected thousands of (near-)duplicates in the Protein Data Bank, whose presence inevitably skews machine learning predictions. The resulting invariant space allows low-dimensional maps with analytically defined coordinates that reveal substantial variability in the protein universe.<|reference_end|> | arxiv | @article{anosova2024complete,
title={Complete and bi-continuous invariant of protein backbones under rigid
motion},
author={Olga Anosova, Alexey Gorelov, William Jeffcott, Ziqiu Jiang, Vitaliy
Kurlin},
journal={arXiv preprint arXiv:2410.08203},
year={2024},
archivePrefix={arXiv},
eprint={2410.08203},
primaryClass={cs.CG}
} | anosova2024complete |
arxiv-668254 | 2410.08206 | Interactive4D: Interactive 4D LiDAR Segmentation | <|reference_start|>Interactive4D: Interactive 4D LiDAR Segmentation: Interactive segmentation has an important role in facilitating the annotation process of future LiDAR datasets. Existing approaches sequentially segment individual objects at each LiDAR scan, repeating the process throughout the entire sequence, which is redundant and ineffective. In this work, we propose interactive 4D segmentation, a new paradigm that allows segmenting multiple objects on multiple LiDAR scans simultaneously, and Interactive4D, the first interactive 4D segmentation model that segments multiple objects on superimposed consecutive LiDAR scans in a single iteration by utilizing the sequential nature of LiDAR data. While performing interactive segmentation, our model leverages the entire space-time volume, leading to more efficient segmentation. Operating on the 4D volume, it directly provides consistent instance IDs over time and also simplifies tracking annotations. Moreover, we show that click simulations are crucial for successful model training on LiDAR point clouds. To this end, we design a click simulation strategy that is better suited for the characteristics of LiDAR data. To demonstrate its accuracy and effectiveness, we evaluate Interactive4D on multiple LiDAR datasets, where Interactive4D achieves a new state-of-the-art by a large margin. Upon acceptance, we will publicly release the code and models at https://vision.rwth-aachen.de/Interactive4D.<|reference_end|> | arxiv | @article{fradlin2024interactive4d:,
title={Interactive4D: Interactive 4D LiDAR Segmentation},
author={Ilya Fradlin, Idil Esen Zulfikar, Kadir Yilmaz, Theodora Kontogianni,
Bastian Leibe},
journal={arXiv preprint arXiv:2410.08206},
year={2024},
archivePrefix={arXiv},
eprint={2410.08206},
primaryClass={cs.CV}
} | fradlin2024interactive4d: |
arxiv-668255 | 2410.08207 | DICE: Discrete Inversion Enabling Controllable Editing for Multinomial Diffusion and Masked Generative Models | <|reference_start|>DICE: Discrete Inversion Enabling Controllable Editing for Multinomial Diffusion and Masked Generative Models: Discrete diffusion models have achieved success in tasks like image generation and masked language modeling but face limitations in controlled content editing. We introduce DICE (Discrete Inversion for Controllable Editing), the first approach to enable precise inversion for discrete diffusion models, including multinomial diffusion and masked generative models. By recording noise sequences and masking patterns during the reverse diffusion process, DICE enables accurate reconstruction and flexible editing of discrete data without the need for predefined masks or attention manipulation. We demonstrate the effectiveness of DICE across both image and text domains, evaluating it on models such as VQ-Diffusion, Paella, and RoBERTa. Our results show that DICE preserves high data fidelity while enhancing editing capabilities, offering new opportunities for fine-grained content manipulation in discrete spaces. For project webpage, see https://hexiaoxiao-cs.github.io/DICE/.<|reference_end|> | arxiv | @article{he2024dice:,
title={DICE: Discrete Inversion Enabling Controllable Editing for Multinomial
Diffusion and Masked Generative Models},
author={Xiaoxiao He, Ligong Han, Quan Dao, Song Wen, Minhao Bai, Di Liu, Han
Zhang, Martin Renqiang Min, Felix Juefei-Xu, Chaowei Tan, Bo Liu, Kang Li,
Hongdong Li, Junzhou Huang, Faez Ahmed, Akash Srivastava, Dimitris Metaxas},
journal={arXiv preprint arXiv:2410.08207},
year={2024},
archivePrefix={arXiv},
eprint={2410.08207},
primaryClass={cs.CV cs.LG}
} | he2024dice: |
arxiv-668256 | 2410.08208 | SPA: 3D Spatial-Awareness Enables Effective Embodied Representation | <|reference_start|>SPA: 3D Spatial-Awareness Enables Effective Embodied Representation: In this paper, we introduce SPA, a novel representation learning framework that emphasizes the importance of 3D spatial awareness in embodied AI. Our approach leverages differentiable neural rendering on multi-view images to endow a vanilla Vision Transformer (ViT) with intrinsic spatial understanding. We present the most comprehensive evaluation of embodied representation learning to date, covering 268 tasks across 8 simulators with diverse policies in both single-task and language-conditioned multi-task scenarios. The results are compelling: SPA consistently outperforms more than 10 state-of-the-art representation methods, including those specifically designed for embodied AI, vision-centric tasks, and multi-modal applications, while using less training data. Furthermore, we conduct a series of real-world experiments to confirm its effectiveness in practical scenarios. These results highlight the critical role of 3D spatial awareness for embodied representation learning. Our strongest model takes more than 6000 GPU hours to train and we are committed to open-sourcing all code and model weights to foster future research in embodied representation learning. Project Page: https://haoyizhu.github.io/spa/.<|reference_end|> | arxiv | @article{zhu2024spa:,
title={SPA: 3D Spatial-Awareness Enables Effective Embodied Representation},
author={Haoyi Zhu and Honghui Yang and Yating Wang and Jiange Yang and Limin
Wang and Tong He},
journal={arXiv preprint arXiv:2410.08208},
year={2024},
archivePrefix={arXiv},
eprint={2410.08208},
primaryClass={cs.CV cs.AI cs.LG cs.RO}
} | zhu2024spa: |
arxiv-668257 | 2410.08209 | Emerging Pixel Grounding in Large Multimodal Models Without Grounding Supervision | <|reference_start|>Emerging Pixel Grounding in Large Multimodal Models Without Grounding Supervision: Current large multimodal models (LMMs) face challenges in grounding, which requires the model to relate language components to visual entities. Contrary to the common practice that fine-tunes LMMs with additional grounding supervision, we find that the grounding ability can in fact emerge in LMMs trained without explicit grounding supervision. To reveal this emerging grounding, we introduce an "attend-and-segment" method which leverages attention maps from standard LMMs to perform pixel-level segmentation. Furthermore, to enhance the grounding ability, we propose DIFFLMM, an LMM utilizing a diffusion-based visual encoder, as opposed to the standard CLIP visual encoder, and trained with the same weak supervision. Without being constrained by the biases and limited scale of grounding-specific supervision data, our approach is more generalizable and scalable. We achieve competitive performance on both grounding-specific and general visual question answering benchmarks, compared with grounding LMMs and generalist LMMs, respectively. Notably, we achieve a 44.2 grounding mask recall on grounded conversation generation without any grounding supervision, outperforming the extensively supervised model GLaMM. Project page: https://groundLMM.github.io.<|reference_end|> | arxiv | @article{cao2024emerging,
title={Emerging Pixel Grounding in Large Multimodal Models Without Grounding
Supervision},
author={Shengcao Cao, Liang-Yan Gui, Yu-Xiong Wang},
journal={arXiv preprint arXiv:2410.08209},
year={2024},
archivePrefix={arXiv},
eprint={2410.08209},
primaryClass={cs.CV cs.AI cs.LG}
} | cao2024emerging |
arxiv-668258 | 2410.08210 | PointOBB-v2: Towards Simpler, Faster, and Stronger Single Point Supervised Oriented Object Detection | <|reference_start|>PointOBB-v2: Towards Simpler, Faster, and Stronger Single Point Supervised Oriented Object Detection: Single point supervised oriented object detection has gained attention and made initial progress within the community. Diverse from those approaches relying on one-shot samples or powerful pretrained models (e.g. SAM), PointOBB has shown promise due to its prior-free feature. In this paper, we propose PointOBB-v2, a simpler, faster, and stronger method to generate pseudo rotated boxes from points without relying on any other prior. Specifically, we first generate a Class Probability Map (CPM) by training the network with non-uniform positive and negative sampling. We show that the CPM is able to learn the approximate object regions and their contours. Then, Principal Component Analysis (PCA) is applied to accurately estimate the orientation and the boundary of objects. By further incorporating a separation mechanism, we resolve the confusion caused by the overlapping on the CPM, enabling its operation in high-density scenarios. Extensive comparisons demonstrate that our method achieves a training speed 15.58x faster and an accuracy improvement of 11.60%/25.15%/21.19% on the DOTA-v1.0/v1.5/v2.0 datasets compared to the previous state-of-the-art, PointOBB. This significantly advances the cutting edge of single point supervised oriented detection in the modular track.<|reference_end|> | arxiv | @article{ren2024pointobb-v2:,
title={PointOBB-v2: Towards Simpler, Faster, and Stronger Single Point
Supervised Oriented Object Detection},
author={Botao Ren, Xue Yang, Yi Yu, Junwei Luo, Zhidong Deng},
journal={arXiv preprint arXiv:2410.08210},
year={2024},
archivePrefix={arXiv},
eprint={2410.08210},
primaryClass={cs.CV cs.AI}
} | ren2024pointobb-v2: |
arxiv-668259 | 2410.08211 | LatteCLIP: Unsupervised CLIP Fine-Tuning via LMM-Synthetic Texts | <|reference_start|>LatteCLIP: Unsupervised CLIP Fine-Tuning via LMM-Synthetic Texts: Large-scale vision-language pre-trained (VLP) models (e.g., CLIP) are renowned for their versatility, as they can be applied to diverse applications in a zero-shot setup. However, when these models are used in specific domains, their performance often falls short due to domain gaps or the under-representation of these domains in the training data. While fine-tuning VLP models on custom datasets with human-annotated labels can address this issue, annotating even a small-scale dataset (e.g., 100k samples) can be an expensive endeavor, often requiring expert annotators if the task is complex. To address these challenges, we propose LatteCLIP, an unsupervised method for fine-tuning CLIP models on classification with known class names in custom domains, without relying on human annotations. Our method leverages Large Multimodal Models (LMMs) to generate expressive textual descriptions for both individual images and groups of images. These provide additional contextual information to guide the fine-tuning process in the custom domains. Since LMM-generated descriptions are prone to hallucination or missing details, we introduce a novel strategy to distill only the useful information and stabilize the training. Specifically, we learn rich per-class prototype representations from noisy generated texts and dual pseudo-labels. Our experiments on 10 domain-specific datasets show that LatteCLIP outperforms pre-trained zero-shot methods by an average improvement of +4.74 points in top-1 accuracy and other state-of-the-art unsupervised methods by +3.45 points.<|reference_end|> | arxiv | @article{cao2024latteclip:,
title={LatteCLIP: Unsupervised CLIP Fine-Tuning via LMM-Synthetic Texts},
author={Anh-Quan Cao, Maximilian Jaritz, Matthieu Guillaumin, Raoul de
Charette, Loris Bazzani},
journal={arXiv preprint arXiv:2410.08211},
year={2024},
archivePrefix={arXiv},
eprint={2410.08211},
primaryClass={cs.CV cs.AI cs.CL}
} | cao2024latteclip: |
arxiv-668260 | adap-org/9807003 | Development and Evolution of Neural Networks in an Artificial Chemistry | <|reference_start|>Development and Evolution of Neural Networks in an Artificial Chemistry: We present a model of decentralized growth for Artificial Neural Networks (ANNs) inspired by the development and the physiology of real nervous systems. In this model, each individual artificial neuron is an autonomous unit whose behavior is determined only by the genetic information it harbors and local concentrations of substrates modeled by a simple artificial chemistry. Gene expression is manifested as axon and dendrite growth, cell division and differentiation, substrate production and cell stimulation. We demonstrate the model's power with a hand-written genome that leads to the growth of a simple network which performs classical conditioning. To evolve more complex structures, we implemented a platform-independent, asynchronous, distributed Genetic Algorithm (GA) that allows users to participate in evolutionary experiments via the World Wide Web.<|reference_end|> | arxiv | @article{astor1998development,
title={Development and Evolution of Neural Networks in an Artificial Chemistry},
author={Jens C. Astor and Christoph Adami (Caltech)},
journal={arXiv preprint arXiv:adap-org/9807003},
year={1998},
number={KRL MAP-234},
archivePrefix={arXiv},
eprint={adap-org/9807003},
primaryClass={adap-org cs.NE nlin.AO q-bio.PE}
} | astor1998development |
arxiv-668261 | adap-org/9903003 | Evolution of genetic organization in digital organisms | <|reference_start|>Evolution of genetic organization in digital organisms: We examine the evolution of expression patterns and the organization of genetic information in populations of self-replicating digital organisms. Seeding the experiments with a linearly expressed ancestor, we witness the development of complex, parallel secondary expression patterns. Using principles from information theory, we demonstrate an evolutionary pressure towards overlapping expressions causing variation (and hence further evolution) to sharply drop. Finally, we compare the overlapping sections of dominant genomes to those portions which are singly expressed and observe a significant difference in the entropy of their encoding.<|reference_end|> | arxiv | @article{ofria1999evolution,
title={Evolution of genetic organization in digital organisms},
author={Charles Ofria and Christoph Adami (California Institute of Technology)},
journal={arXiv preprint arXiv:adap-org/9903003},
year={1999},
archivePrefix={arXiv},
eprint={adap-org/9903003},
primaryClass={adap-org cs.NE nlin.AO q-bio.PE}
} | ofria1999evolution |
arxiv-668262 | adap-org/9909006 | Performance of data networks with random links | <|reference_start|>Performance of data networks with random links: We investigate simplified models of computer data networks and examine how the introduction of additional random links influences the performance of these net works. In general, the impact of additional random links on the performance of the network strongly depends on the routing algorithm used in the network. Significant performance gains can be achieved if the routing is based on "geometrical distance" or shortest path reduced table routing. With shortest path full table routing degradation of performance is observed.<|reference_end|> | arxiv | @article{fuks1999performance,
title={Performance of data networks with random links},
author={Henryk Fuks and Anna T. Lawniczak},
journal={Mathematics and Computers in Simulation 51 103-119 (1999)},
year={1999},
doi={10.1016/S0378-4754(99)00125-1},
number={The Fields Institute report FI-PIA1999-012},
archivePrefix={arXiv},
eprint={adap-org/9909006},
primaryClass={adap-org cs.NI nlin.AO}
} | fuks1999performance |
arxiv-668263 | alg-geom/9608018 | Rank Two Bundles on Algebraic Curves and Decoding of Goppa Codes | <|reference_start|>Rank Two Bundles on Algebraic Curves and Decoding of Goppa Codes: We study a connection between two topics: Decoding of Goppa codes arising from an algebraic curve, and rank two extensions of certain line bundles on the curve.<|reference_end|> | arxiv | @article{johnsen1996rank,
title={Rank Two Bundles on Algebraic Curves and Decoding of Goppa Codes},
author={Trygve Johnsen},
journal={arXiv preprint arXiv:alg-geom/9608018},
year={1996},
archivePrefix={arXiv},
eprint={alg-geom/9608018},
primaryClass={alg-geom cs.IT math.AG math.IT}
} | johnsen1996rank |
arxiv-668264 | astro-ph/0005101 | Data Streams from the Low Frequency Instrument On-Board the Planck Satellite: Statistical Analysis and Compression Efficiency | <|reference_start|>Data Streams from the Low Frequency Instrument On-Board the Planck Satellite: Statistical Analysis and Compression Efficiency: The expected data rate produced by the Low Frequency Instrument (LFI) planned to fly on the ESA Planck mission in 2007, is over a factor 8 larger than the bandwidth allowed by the spacecraft transmission system to download the LFI data. We discuss the application of lossless compression to Planck/LFI data streams in order to reduce the overall data flow. We perform both theoretical analysis and experimental tests using realistically simulated data streams in order to fix the statistical properties of the signal and the maximal compression rate allowed by several lossless compression algorithms. We studied the influence of signal composition and of acquisition parameters on the compression rate Cr and develop a semiempirical formalism to account for it. The best performing compressor tested up to now is the arithmetic compression of order 1, designed for optimizing the compression of white noise like signals, which allows an overall compression rate <Cr> = 2.65 +/- 0.02. We find that such result is not improved by other lossless compressors, being the signal almost white noise dominated. Lossless compression algorithms alone will not solve the bandwidth problem but needs to be combined with other techniques.<|reference_end|> | arxiv | @article{maris2000data,
title={Data Streams from the Low Frequency Instrument On-Board the Planck
Satellite: Statistical Analysis and Compression Efficiency},
author={M. Maris, D. Maino, C. Burigana, F. Pasian},
journal={arXiv preprint arXiv:astro-ph/0005101},
year={2000},
doi={10.1051/aas:2000289},
number={OAT Int. Rep. 71/00 - OAT Pub. Num. 2140},
archivePrefix={arXiv},
eprint={astro-ph/0005101},
primaryClass={astro-ph cs.OH physics.data-an physics.space-ph}
} | maris2000data |
arxiv-668265 | astro-ph/0008307 | Science User Scenarios for a Virtual Observatory Design Reference Mission: Science Requirements for Data Mining | <|reference_start|>Science User Scenarios for a Virtual Observatory Design Reference Mission: Science Requirements for Data Mining: The knowledge discovery potential of the new large astronomical databases is vast. When these are used in conjunction with the rich legacy data archives, the opportunities for scientific discovery multiply rapidly. A Virtual Observatory (VO) framework will enable transparent and efficient access, search, retrieval, and visualization of data across multiple data repositories, which are generally heterogeneous and distributed. Aspects of data mining that apply to a variety of science user scenarios with a VO are reviewed. The development of a VO should address the data mining needs of various astronomical research constituencies. By way of example, two user scenarios are presented which invoke applications and linkages of data across the catalog and image domains in order to address specific astrophysics research problems. These illustrate a subset of the desired capabilities and power of the VO, and as such they represent potential components of a VO Design Reference Mission.<|reference_end|> | arxiv | @article{borne2000science,
title={Science User Scenarios for a Virtual Observatory Design Reference
Mission: Science Requirements for Data Mining},
author={Kirk D. Borne},
journal={arXiv preprint arXiv:astro-ph/0008307},
year={2000},
archivePrefix={arXiv},
eprint={astro-ph/0008307},
primaryClass={astro-ph cs.DB cs.DL cs.IR}
} | borne2000science |
arxiv-668266 | astro-ph/0010583 | Data Mining in Astronomical Databases | <|reference_start|>Data Mining in Astronomical Databases: A Virtual Observatory (VO) will enable transparent and efficient access, search, retrieval, and visualization of data across multiple data repositories, which are generally heterogeneous and distributed. Aspects of data mining that apply to a variety of science user scenarios with a VO are reviewed.<|reference_end|> | arxiv | @article{borne2000data,
title={Data Mining in Astronomical Databases},
author={Kirk D. Borne (1 and 2) ((1) Raytheon Information Technology and
Scientific Services, (2) NASA Goddard Space Flight Center)},
journal={arXiv preprint arXiv:astro-ph/0010583},
year={2000},
doi={10.1007/10849171_88},
archivePrefix={arXiv},
eprint={astro-ph/0010583},
primaryClass={astro-ph cs.DB cs.DL cs.IR}
} | borne2000data |
arxiv-668267 | astro-ph/0107084 | A Multi-Threaded Fast Convolver for Dynamically Parallel Image Filtering | <|reference_start|>A Multi-Threaded Fast Convolver for Dynamically Parallel Image Filtering: 2D convolution is a staple of digital image processing. The advent of large format imagers makes it possible to literally ``pave'' with silicon the focal plane of an optical sensor, which results in very large images that can require a significant amount computation to process. Filtering of large images via 2D convolutions is often complicated by a variety of effects (e.g., non-uniformities found in wide field of view instruments). This paper describes a fast (FFT based) method for convolving images, which is also well suited to very large images. A parallel version of the method is implemented using a multi-threaded approach, which allows more efficient load balancing and a simpler software architecture. The method has been implemented within in a high level interpreted language (IDL), while also exploiting open standards vector libraries (VSIPL) and open standards parallel directives (OpenMP). The parallel approach and software architecture are generally applicable to a variety of algorithms and has the advantage of enabling users to obtain the convenience of an easy operating environment while also delivering high performance using a fully portable code.<|reference_end|> | arxiv | @article{kepner2001a,
title={A Multi-Threaded Fast Convolver for Dynamically Parallel Image Filtering},
author={Jeremy Kepner (MIT Lincoln Laboratory)},
journal={Journal of Parallel and Distributed Computing archive Volume 63
Issue 3, March 2003 Pages 360 - 372},
year={2001},
doi={10.1016/S0743-7315(02)00054-0},
archivePrefix={arXiv},
eprint={astro-ph/0107084},
primaryClass={astro-ph cs.DC}
} | kepner2001a |
arxiv-668268 | astro-ph/0112092 | Systolic and Hyper-Systolic Algorithms for the Gravitational N-Body Problem, with an Application to Brownian Motion | <|reference_start|>Systolic and Hyper-Systolic Algorithms for the Gravitational N-Body Problem, with an Application to Brownian Motion: A systolic algorithm rhythmically computes and passes data through a network of processors. We investigate the performance of systolic algorithms for implementing the gravitational N-body problem on distributed-memory computers. Systolic algorithms minimize memory requirements by distributing the particles between processors. We show that the performance of systolic routines can be greatly enhanced by the use of non-blocking communication, which allows particle coordinates to be communicated at the same time that force calculations are being carried out. Hyper-systolic algorithms reduce the communication complexity at the expense of increased memory demands. As an example of an application requiring large N, we use the systolic algorithm to carry out direct-summation simulations using 10^6 particles of the Brownian motion of the supermassive black hole at the center of the Milky Way galaxy. We predict a 3D random velocity of 0.4 km/s for the black hole.<|reference_end|> | arxiv | @article{dorband2001systolic,
title={Systolic and Hyper-Systolic Algorithms for the Gravitational N-Body
Problem, with an Application to Brownian Motion},
author={E. N. Dorband, Marc Hemsendorf and David Merritt (Rutgers University)},
journal={J.Comput.Phys. 185 (2003) 484-511},
year={2001},
doi={10.1016/S0021-9991(02)00067-0},
number={Rutgers Astrophysics Preprint Series No. 327},
archivePrefix={arXiv},
eprint={astro-ph/0112092},
primaryClass={astro-ph cs.DC physics.comp-ph}
} | dorband2001systolic |
arxiv-668269 | astro-ph/0305447 | Fast n-point correlation functions and three-point lensing application | <|reference_start|>Fast n-point correlation functions and three-point lensing application: We present a new algorithm to rapidly compute the two-point (2PCF), three-point (3PCF) and n-point (n-PCF) correlation functions in roughly O(N log N) time for N particles, instead of O(N^n) as required by brute force approaches. The algorithm enables an estimate of the full 3PCF for as many as 10^6 galaxies. This technique exploits node-to-node correlations of a recursive bisectional binary tree. A balanced tree construction minimizes the depth of the tree and the worst case error at each node. The algorithm presented in this paper can be applied to problems with arbitrary geometry. We describe the detailed implementation to compute the two point function and all eight components of the 3PCF for a two-component field, with attention to shear fields generated by gravitational lensing. We also generalize the algorithm to compute the n-point correlation function for a scalar field in k dimensions where n and k are arbitrary positive integers.<|reference_end|> | arxiv | @article{zhang2003fast,
title={Fast n-point correlation functions and three-point lensing application},
author={Lucy Liuxuan Zhang (1), Ue-Li Pen (1) ((1) CITA, University of
Toronto)},
journal={New Astron. 10 (2005) 569-590},
year={2003},
doi={10.1016/j.newast.2005.04.002},
number={CITA-2003-51},
archivePrefix={arXiv},
eprint={astro-ph/0305447},
primaryClass={astro-ph cs.CC cs.DS}
} | zhang2003fast |
arxiv-668270 | astro-ph/0402591 | Evolutionary design of photometric systems and its application to Gaia | <|reference_start|>Evolutionary design of photometric systems and its application to Gaia: Designing a photometric system to best fulfil a set of scientific goals is a complex task, demanding a compromise between conflicting requirements and subject to various constraints. A specific example is the determination of stellar astrophysical parameters (APs) - effective temperature, metallicity etc. - across a wide range of stellar types. I present a novel approach to this problem which makes minimal assumptions about the required filter system. By considering a filter system as a set of free parameters it may be designed by optimizing some figure-of-merit (FoM) with respect to these parameters. In the example considered, the FoM is a measure of how well the filter system can `separate' stars with different APs. This separation is vectorial in nature, in the sense that the local directions of AP variance are preferably mutually orthogonal to avoid AP degeneracy. The optimization is carried out with an evolutionary algorithm, which uses principles of evolutionary biology to search the parameter space. This model, HFD (Heuristic Filter Design), is applied to the design of photometric systems for the Gaia space astrometry mission. The optimized systems show a number of interesting features, not least the persistence of broad, overlapping filters. These HFD systems perform as least as well as other proposed systems for Gaia, although inadequacies remain in all. The principles underlying HFD are quite generic and may be applied to filter design for numerous other projects, such as the search for specific types of objects or photometric redshift determination.<|reference_end|> | arxiv | @article{bailer-jones2004evolutionary,
title={Evolutionary design of photometric systems and its application to Gaia},
author={C.A.L. Bailer-Jones (Carnegie Mellon University, Pittsburgh, PA;
Max-Planck-Institut fuer Astronomie, Heidelberg)},
journal={Astron.Astrophys. 419 (2004) 385-403},
year={2004},
doi={10.1051/0004-6361:20035779},
archivePrefix={arXiv},
eprint={astro-ph/0402591},
primaryClass={astro-ph cs.NE stat.ML}
} | bailer-jones2004evolutionary |
arxiv-668271 | astro-ph/0502164 | Particle Swarm Optimization: An efficient method for tracing periodic orbits in 3D galactic potentials | <|reference_start|>Particle Swarm Optimization: An efficient method for tracing periodic orbits in 3D galactic potentials: We propose the Particle Swarm Optimization (PSO) as an alternative method for locating periodic orbits in a three--dimensional (3D) model of barred galaxies. We develop an appropriate scheme that transforms the problem of finding periodic orbits into the problem of detecting global minimizers of a function, which is defined on the Poincar\'{e} Surface of Section (PSS) of the Hamiltonian system. By combining the PSO method with deflection techniques, we succeeded in tracing systematically several periodic orbits of the system. The method succeeded in tracing the initial conditions of periodic orbits in cases where Newton iterative techniques had difficulties. In particular, we found families of 2D and 3D periodic orbits associated with the inner 8:1 to 12:1 resonances, between the radial 4:1 and corotation resonances of our 3D Ferrers bar model. The main advantages of the proposed algorithm is its simplicity, its ability to work using function values solely, as well as its ability to locate many periodic orbits per run at a given Jacobian constant.<|reference_end|> | arxiv | @article{skokos2005particle,
title={Particle Swarm Optimization: An efficient method for tracing periodic
orbits in 3D galactic potentials},
author={Ch. Skokos, K.E. Parsopoulos, P.A. Patsis, M.N. Vrahatis},
journal={Mon.Not.Roy.Astron.Soc. 359 (2005) 251-260},
year={2005},
doi={10.1111/j.1365-2966.2005.08892.x},
archivePrefix={arXiv},
eprint={astro-ph/0502164},
primaryClass={astro-ph cs.NA cs.NE math.NA nlin.CD}
} | skokos2005particle |
arxiv-668272 | astro-ph/0504006 | Virtual Observatory: From Concept to Implementation | <|reference_start|>Virtual Observatory: From Concept to Implementation: We review the origins of the Virtual Observatory (VO) concept, and the current status of the efforts in this field. VO is the response of the astronomical community to the challenges posed by the modern massive and complex data sets. It is a framework in which information technology is harnessed to organize, maintain, and explore the rich information content of the exponentially growing data sets, and to enable a qualitatively new science to be done with them. VO will become a complete, open, distributed, web-based framework for astronomy of the early 21st century. A number of significant efforts worldwide are now striving to convert this vision into reality. The technological and methodological challenges posed by the information-rich astronomy are also common to many other fields. We see a fundamental change in the way all science is done, driven by the information technology revolution.<|reference_end|> | arxiv | @article{djorgovski2005virtual,
title={Virtual Observatory: From Concept to Implementation},
author={S.G. Djorgovski, R. Williams},
journal={arXiv preprint arXiv:astro-ph/0504006},
year={2005},
archivePrefix={arXiv},
eprint={astro-ph/0504006},
primaryClass={astro-ph cs.CE}
} | djorgovski2005virtual |
arxiv-668273 | astro-ph/0506110 | Galactic Gradients, Postbiological Evolution and the Apparent Failure of SETI | <|reference_start|>Galactic Gradients, Postbiological Evolution and the Apparent Failure of SETI: Motivated by recent developments impacting our view of Fermi's paradox (absence of extraterrestrials and their manifestations from our past light cone), we suggest a reassessment of the problem itself, as well as of strategies employed by SETI projects so far. The need for such reevaluation is fueled not only by the failure of searches thus far, but also by great advances recently made in astrophysics, astrobiology, computer science and future studies, which have remained largely ignored in SETI practice. As an example of the new approach, we consider the effects of the observed metallicity and temperature gradients in the Milky Way on the spatial distribution of hypothetical advanced extraterrestrial intelligent communities. While, obviously, properties of such communities and their sociological and technological preferences are entirely unknown, we assume that (1) they operate in agreement with the known laws of physics, and (2) that at some point they typically become motivated by a meta-principle embodying the central role of information-processing; a prototype of the latter is the recently suggested Intelligence Principle of Steven J. Dick. There are specific conclusions of practical interest to be drawn from coupling of these reasonable assumptions with the astrophysical and astrochemical structure of the Galaxy. In particular, we suggest that the outer regions of the Galactic disk are most likely locations for advanced SETI targets, and that intelligent communities will tend to migrate outward through the Galaxy as their capacities of information-processing increase, for both thermodynamical and astrochemical reasons. This can also be regarded as a possible generalization of the Galactic Habitable Zone, concept currently much investigated in astrobiology.<|reference_end|> | arxiv | @article{cirkovic2005galactic,
title={Galactic Gradients, Postbiological Evolution and the Apparent Failure of
SETI},
author={Milan M. Cirkovic and Robert J. Bradbury},
journal={New Astron. 11 (2006) 628-639},
year={2005},
doi={10.1016/j.newast.2006.04.003},
archivePrefix={arXiv},
eprint={astro-ph/0506110},
primaryClass={astro-ph cs.AI physics.soc-ph}
} | cirkovic2005galactic |
arxiv-668274 | astro-ph/0506308 | Fast directional continuous spherical wavelet transform algorithms | <|reference_start|>Fast directional continuous spherical wavelet transform algorithms: We describe the construction of a spherical wavelet analysis through the inverse stereographic projection of the Euclidean planar wavelet framework, introduced originally by Antoine and Vandergheynst and developed further by Wiaux et al. Fast algorithms for performing the directional continuous wavelet analysis on the unit sphere are presented. The fast directional algorithm, based on the fast spherical convolution algorithm developed by Wandelt and Gorski, provides a saving of O(sqrt(Npix)) over a direct quadrature implementation for Npix pixels on the sphere, and allows one to perform a directional spherical wavelet analysis of a 10^6 pixel map on a personal computer.<|reference_end|> | arxiv | @article{mcewen2005fast,
title={Fast directional continuous spherical wavelet transform algorithms},
author={J. D. McEwen, M. P. Hobson, D. J. Mortlock, A. N. Lasenby},
journal={IEEE Trans.Signal Process. 55 (2007) 520-529},
year={2005},
doi={10.1109/TSP.2006.887148},
archivePrefix={arXiv},
eprint={astro-ph/0506308},
primaryClass={astro-ph cs.IT math.IT}
} | mcewen2005fast |
arxiv-668275 | astro-ph/0510041 | Acceleration of adaptive optics simulations using programmable logic | <|reference_start|>Acceleration of adaptive optics simulations using programmable logic: Numerical Simulation is an essential part of the design and optimisation of astronomical adaptive optics systems. Simulations of adaptive optics are computationally expensive and the problem scales rapidly with telescope aperture size, as the required spatial order of the correcting system increases. Practical realistic simulations of AO systems for extremely large telescopes are beyond the capabilities of all but the largest of modern parallel supercomputers. Here we describe a more cost effective approach through the use of hardware acceleration using field programmable gate arrays. By transferring key parts of the simulation into programmable logic, large increases in computational bandwidth can be expected. We show that the calculation of wavefront sensor image centroids can be accelerated by a factor of four by transferring the algorithm into hardware. Implementing more demanding parts of the adaptive optics simulation in hardware will lead to much greater performance improvements, of up to 1000 times.<|reference_end|> | arxiv | @article{basden2005acceleration,
title={Acceleration of adaptive optics simulations using programmable logic},
author={A. G. Basden, F. Assemat, T. Butterley, D. Geng, C.D. Saunter, R.W.
Wilson},
journal={Mon.Not.Roy.Astron.Soc.364:1413-1418,2005},
year={2005},
doi={10.1111/j.1365-2966.2005.09670.x},
archivePrefix={arXiv},
eprint={astro-ph/0510041},
primaryClass={astro-ph cs.DC}
} | basden2005acceleration |
arxiv-668276 | astro-ph/0510688 | Using the Parallel Virtual Machine for Everyday Analysis | <|reference_start|>Using the Parallel Virtual Machine for Everyday Analysis: A review of the literature reveals that while parallel computing is sometimes employed by astronomers for custom, large-scale calculations, no package fosters the routine application of parallel methods to standard problems in astronomical data analysis. This paper describes our attempt to close that gap by wrapping the Parallel Virtual Machine (PVM) as a scriptable S-Lang module. Using PVM within ISIS, the Interactive Spectral Interpretation System, we've distributed a number of representive calculations over a network of 25+ CPUs to achieve dramatic reductions in execution times. We discuss how the approach applies to a wide class of modeling problems, outline our efforts to make it more transparent for common use, and note its growing importance in the context of the large, multi-wavelength datasets used in modern analysis.<|reference_end|> | arxiv | @article{noble2005using,
title={Using the Parallel Virtual Machine for Everyday Analysis},
author={M.S. Noble, J.C. Houck, J.E. Davis, A. Young, M. Nowak},
journal={arXiv preprint arXiv:astro-ph/0510688},
year={2005},
archivePrefix={arXiv},
eprint={astro-ph/0510688},
primaryClass={astro-ph cs.DC}
} | noble2005using |
arxiv-668277 | astro-ph/0605042 | How accurate are the time delay estimates in gravitational lensing? | <|reference_start|>How accurate are the time delay estimates in gravitational lensing?: We present a novel approach to estimate the time delay between light curves of multiple images in a gravitationally lensed system, based on Kernel methods in the context of machine learning. We perform various experiments with artificially generated irregularly-sampled data sets to study the effect of the various levels of noise and the presence of gaps of various size in the monitoring data. We compare the performance of our method with various other popular methods of estimating the time delay and conclude, from experiments with artificial data, that our method is least vulnerable to missing data and irregular sampling, within reasonable bounds of Gaussian noise. Thereafter, we use our method to determine the time delays between the two images of quasar Q0957+561 from radio monitoring data at 4 cm and 6 cm, and conclude that if only the observations at epochs common to both wavelengths are used, the time delay gives consistent estimates, which can be combined to yield 408\pm 12 days. The full 6 cm dataset, which covers a longer monitoring period, yields a value which is 10% larger, but this can be attributed to differences in sampling and missing data.<|reference_end|> | arxiv | @article{cuevas-tello2006how,
title={How accurate are the time delay estimates in gravitational lensing?},
author={Juan C. Cuevas-Tello (1,3), Peter Tino (1) and Somak Raychaudhury (2)
((1) School of Computer Science, University of Birmingham, UK; (2) School of
Physics & Astronomy, University of Birmingham, UK; (3) University of San Luis
Potosi, Mexico)},
journal={Astron.Astrophys. 454 (2006) 695-706},
year={2006},
doi={10.1051/0004-6361:20054652},
archivePrefix={arXiv},
eprint={astro-ph/0605042},
primaryClass={astro-ph cs.LG}
} | cuevas-tello2006how |
arxiv-668278 | astro-ph/0605514 | An algorithm for solving the pulsar equation | <|reference_start|>An algorithm for solving the pulsar equation: We present an algorithm of finding numerical solutions of pulsar equation. The problem of finding the solutions was reduced to finding expansion coefficients of the source term of the equation in a base of orthogo- nal functions defined on the unit interval by minimizing a multi-variable mismatch function defined on the light cylinder. We applied the algorithm to Scharlemann & Wagoner boundary conditions by which a smooth solu- tion is reconstructed that by construction passes success- fully the Gruzinov's test of the source function exponent.<|reference_end|> | arxiv | @article{bratek2006an,
title={An algorithm for solving the pulsar equation},
author={Lukasz Bratek, Marcin Kolonko},
journal={Astrophys.SpaceSci.309:231-234,2007},
year={2006},
doi={10.1007/s10509-007-9406-y},
archivePrefix={arXiv},
eprint={astro-ph/0605514},
primaryClass={astro-ph cs.NA}
} | bratek2006an |
arxiv-668279 | astro-ph/0609159 | A directional continuous wavelet transform on the sphere | <|reference_start|>A directional continuous wavelet transform on the sphere: A new construction of a directional continuous wavelet analysis on the sphere is derived herein. We adopt the harmonic scaling idea for the spherical dilation operator recently proposed by Sanz et al. but extend the analysis to a more general directional framework. Directional wavelets are a powerful extension that allow one to also probe oriented structure in the analysed function. Our spherical wavelet methodology has the advantage that all functions and operators are defined directly on the sphere. The construction of wavelets in our framework is demonstrated with an example.<|reference_end|> | arxiv | @article{mcewen2006a,
title={A directional continuous wavelet transform on the sphere},
author={J. D. McEwen, M. P. Hobson, A. N. Lasenby},
journal={arXiv preprint arXiv:astro-ph/0609159},
year={2006},
archivePrefix={arXiv},
eprint={astro-ph/0609159},
primaryClass={astro-ph cs.IT math.IT}
} | mcewen2006a |
arxiv-668280 | astro-ph/0609794 | The Future of Technical Libraries | <|reference_start|>The Future of Technical Libraries: Technical libraries are currently experiencing very rapid change. In the near future their mission will change, their physical nature will change, and the skills of their employees will change. While some will not be able to make these changes, and will fail, others will lead us into a new era.<|reference_end|> | arxiv | @article{kurtz2006the,
title={The Future of Technical Libraries},
author={Michael J. Kurtz, Guenther Eichhorn, Alberto Accomazzi, Carolyn Grant,
Edwin Henneken, Donna Thompson, Elizabeth Bohlen, and Stephen S. Murray},
journal={arXiv preprint arXiv:astro-ph/0609794},
year={2006},
archivePrefix={arXiv},
eprint={astro-ph/0609794},
primaryClass={astro-ph cs.DL}
} | kurtz2006the |
arxiv-668281 | astro-ph/0612688 | Optimal filters on the sphere | <|reference_start|>Optimal filters on the sphere: We derive optimal filters on the sphere in the context of detecting compact objects embedded in a stochastic background process. The matched filter and the scale adaptive filter are derived on the sphere in the most general setting, allowing for directional template profiles and filters. The performance and relative merits of the two optimal filters are discussed. The application of optimal filter theory on the sphere to the detection of compact objects is demonstrated on simulated mock data. A naive detection strategy is adopted, with an initial aim of illustrating the application of the new optimal filters derived on the sphere. Nevertheless, this simple object detection strategy is demonstrated to perform well, even a low signal-to-noise ratio. Code written to compute optimal filters on the sphere (S2FIL), to perform fast directional filtering on the sphere (FastCSWT) and to construct the simulated mock data (COMB) are all made publicly available from http://www.mrao.cam.ac.uk/~jdm57/<|reference_end|> | arxiv | @article{mcewen2006optimal,
title={Optimal filters on the sphere},
author={J. D. McEwen, M. P. Hobson and A. N. Lasenby},
journal={IEEETrans.SignalProcess.56:3813-3823,2008},
year={2006},
doi={10.1109/TSP.2008.923198},
archivePrefix={arXiv},
eprint={astro-ph/0612688},
primaryClass={astro-ph cs.IT math.IT}
} | mcewen2006optimal |
arxiv-668282 | astro-ph/0703485 | Towards Distributed Petascale Computing | <|reference_start|>Towards Distributed Petascale Computing: In this chapter we will argue that studying such multi-scale multi-science systems gives rise to inherently hybrid models containing many different algorithms best serviced by different types of computing environments (ranging from massively parallel computers, via large-scale special purpose machines to clusters of PC's) whose total integrated computing capacity can easily reach the PFlop/s scale. Such hybrid models, in combination with the by now inherently distributed nature of the data on which the models `feed' suggest a distributed computing model, where parts of the multi-scale multi-science model are executed on the most suitable computing environment, and/or where the computations are carried out close to the required data (i.e. bring the computations to the data instead of the other way around). We presents an estimate for the compute requirements to simulate the Galaxy as a typical example of a multi-scale multi-physics application, requiring distributed Petaflop/s computational power.<|reference_end|> | arxiv | @article{hoekstra2007towards,
title={Towards Distributed Petascale Computing},
author={A. G. Hoekstra, S. F. Portegies Zwart, M. Bubak, P. M. A. Sloot},
journal={arXiv preprint arXiv:astro-ph/0703485},
year={2007},
archivePrefix={arXiv},
eprint={astro-ph/0703485},
primaryClass={astro-ph cs.DC}
} | hoekstra2007towards |
arxiv-668283 | astro-ph/9912134 | Interfacing Interpreted and Compiled Languages to Support Applications on a Massively Parallel Network of Workstations (MP-NOW) | <|reference_start|>Interfacing Interpreted and Compiled Languages to Support Applications on a Massively Parallel Network of Workstations (MP-NOW): Astronomers are increasingly using Massively Parallel Network of Workstations (MP-NOW) to address their most challenging computing problems. Fully exploiting these systems is made more difficult as more and more modeling and data analysis software is written in interpreted languages (such as IDL, MATLAB, and Mathematica) which do not lend themselves to parallel computing. We present a specific example of a very simple, but generic solution to this problem. Our example uses an interpreted language (IDL) to set up a calculation and then interfaces with a computational kernel written in a compiled language (C). The IDL code then calls the C code as an external library. We have added to the computational kernel an additional layer, which manages multiple copies of the kernel running on a MP-NOW and returns the results back to the interpreted layer. Our implementation uses The Next generation Taskbag (TNT) library developed at Sarnoff to provide an efficient means for implementing task parallelism. A test problem (taken from Astronomy) has been implemented on the Sarnoff Cyclone computer which consists of 160 heterogeneous nodes connected by a ``fat'' tree 100 Mb/s switched Ethernet running the RedHat Linux and FreeBSD operating systems. Our first results in this ongoing project have demonstrated the feasibility of this approach and produced speedups of greater than 50 on 60 processors.<|reference_end|> | arxiv | @article{kepner1999interfacing,
title={Interfacing Interpreted and Compiled Languages to Support Applications
on a Massively Parallel Network of Workstations (MP-NOW)},
author={Jeremy Kepner (Princeton/MIT LL), Maya Gokhale (Sarnoff/LANL), Ron
Minnich (Sarnoff/LANL), Aaron Marks (Sarnoff) and John DeGood (Sarnoff)},
journal={Cluster Computing 07-2000, Volume 3, Issue 1, pp 35-44},
year={1999},
doi={10.1023/A:1019011716367},
archivePrefix={arXiv},
eprint={astro-ph/9912134},
primaryClass={astro-ph cs.DC}
} | kepner1999interfacing |
arxiv-668284 | chao-dyn/9905036 | Restart Strategies and Internet Congestion | <|reference_start|>Restart Strategies and Internet Congestion: We recently presented a methodology for quantitatively reducing the risk and cost of executing electronic transactions in a bursty network environment such as the Internet. In the language of portfolio theory, time to complete a transaction and its variance replace the expected return and risk associated with a security, whereas restart times replace combinations of securities. While such a strategy works well with single users, the question remains as to its usefulness when used by many. By using mean field arguments and agent-based simulations, we determine that a restart strategy remains advantageous even if everybody uses it.<|reference_end|> | arxiv | @article{maurer1999restart,
title={Restart Strategies and Internet Congestion},
author={Sebastian M. Maurer and Bernardo A. Huberman},
journal={arXiv preprint arXiv:chao-dyn/9905036},
year={1999},
archivePrefix={arXiv},
eprint={chao-dyn/9905036},
primaryClass={chao-dyn adap-org cs.NI nlin.AO nlin.CD}
} | maurer1999restart |
arxiv-668285 | chao-dyn/9909031 | Noncommutative Martin-Lof randomness : on the concept of a random sequence of qubits | <|reference_start|>Noncommutative Martin-Lof randomness : on the concept of a random sequence of qubits: Martin-Lof's definition of random sequences of cbits as those not belonging to any set of constructive zero Lebesgue measure is reformulated in the language of Algebraic Probability Theory. The adoption of the Pour-El Richards theory of computability structures on Banach spaces allows us to give a natural noncommutative extension of Martin-Lof's definition, characterizing the random elements of a chain Von Neumann algebra. In the particular case of the minimally informative noncommutative alphabet our definition reduces to the definition of a random sequence of qubits.<|reference_end|> | arxiv | @article{segre1999noncommutative,
title={Noncommutative Martin-Lof randomness : on the concept of a random
sequence of qubits},
author={Gavriel Segre},
journal={arXiv preprint arXiv:chao-dyn/9909031},
year={1999},
archivePrefix={arXiv},
eprint={chao-dyn/9909031},
primaryClass={chao-dyn adap-org cs.CC math-ph math.MP nlin.AO nlin.CD quant-ph}
} | segre1999noncommutative |
arxiv-668286 | cmp-lg/9404001 | An Alternative Conception of Tree-Adjoining Derivation | <|reference_start|>An Alternative Conception of Tree-Adjoining Derivation: The precise formulation of derivation for tree-adjoining grammars has important ramifications for a wide variety of uses of the formalism, from syntactic analysis to semantic interpretation and statistical language modeling. We argue that the definition of tree-adjoining derivation must be reformulated in order to manifest the proper linguistic dependencies in derivations. The particular proposal is both precisely characterizable through a definition of TAG derivations as equivalence classes of ordered derivation trees, and computationally operational, by virtue of a compilation to linear indexed grammars together with an efficient algorithm for recognition and parsing according to the compiled grammar.<|reference_end|> | arxiv | @article{schabes1994an,
title={An Alternative Conception of Tree-Adjoining Derivation},
author={Yves Schabes and Stuart M. Shieber},
journal={Computational Linguistics 20(1):91-124},
year={1994},
number={CRCT TR-08-92},
archivePrefix={arXiv},
eprint={cmp-lg/9404001},
primaryClass={cmp-lg cs.CL}
} | schabes1994an |
arxiv-668287 | cmp-lg/9404002 | Lessons from a Restricted Turing Test | <|reference_start|>Lessons from a Restricted Turing Test: We report on the recent Loebner prize competition inspired by Turing's test of intelligent behavior. The presentation covers the structure of the competition and the outcome of its first instantiation in an actual event, and an analysis of the purpose, design, and appropriateness of such a competition. We argue that the competition has no clear purpose, that its design prevents any useful outcome, and that such a competition is inappropriate given the current level of technology. We then speculate as to suitable alternatives to the Loebner prize.<|reference_end|> | arxiv | @article{shieber1994lessons,
title={Lessons from a Restricted Turing Test},
author={Stuart M. Shieber},
journal={arXiv preprint arXiv:cmp-lg/9404002},
year={1994},
number={CRCT TR-19-92},
archivePrefix={arXiv},
eprint={cmp-lg/9404002},
primaryClass={cmp-lg cs.CL}
} | shieber1994lessons |
arxiv-668288 | cmp-lg/9404003 | Restricting the Weak-Generative Capacity of Synchronous Tree-Adjoining Grammars | <|reference_start|>Restricting the Weak-Generative Capacity of Synchronous Tree-Adjoining Grammars: The formalism of synchronous tree-adjoining grammars, a variant of standard tree-adjoining grammars (TAG), was intended to allow the use of TAGs for language transduction in addition to language specification. In previous work, the definition of the transduction relation defined by a synchronous TAG was given by appeal to an iterative rewriting process. The rewriting definition of derivation is problematic in that it greatly extends the expressivity of the formalism and makes the design of parsing algorithms difficult if not impossible. We introduce a simple, natural definition of synchronous tree-adjoining derivation, based on isomorphisms between standard tree-adjoining derivations, that avoids the expressivity and implementability problems of the original rewriting definition. The decrease in expressivity, which would otherwise make the method unusable, is offset by the incorporation of an alternative definition of standard tree-adjoining derivation, previously proposed for completely separate reasons, thereby making it practical to entertain using the natural definition of synchronous derivation. Nonetheless, some remaining problematic cases call for yet more flexibility in the definition; the isomorphism requirement may have to be relaxed. It remains for future research to tune the exact requirements on the allowable mappings.<|reference_end|> | arxiv | @article{shieber1994restricting,
title={Restricting the Weak-Generative Capacity of Synchronous Tree-Adjoining
Grammars},
author={Stuart M. Shieber (Harvard University)},
journal={Computational Intelligence 10(4):371-385, November 1994},
year={1994},
archivePrefix={arXiv},
eprint={cmp-lg/9404003},
primaryClass={cmp-lg cs.CL}
} | shieber1994restricting |
arxiv-668289 | cmp-lg/9404004 | An Empirically Motivated Reinterpretation of Dependency Grammar | <|reference_start|>An Empirically Motivated Reinterpretation of Dependency Grammar: Dependency grammar is usually interpreted as equivalent to a strict form of X--bar theory that forbids the stacking of nodes of the same bar level (e.g., N' immediately dominating N' with the same head). But adequate accounts of _one_--anaphora and of the semantics of multiple modifiers require such stacking and accordingly argue against dependency grammar. Dependency grammar can be salvaged by reinterpreting its claims about phrase structure, so that modifiers map onto binary--branching X--bar trees rather than ``flat'' ones.<|reference_end|> | arxiv | @article{covington1994an,
title={An Empirically Motivated Reinterpretation of Dependency Grammar},
author={Michael A. Covington (University of Georgia)},
journal={arXiv preprint arXiv:cmp-lg/9404004},
year={1994},
number={AI-1994-01 (Artificial Intelligence Programs, U. of Georgia)},
archivePrefix={arXiv},
eprint={cmp-lg/9404004},
primaryClass={cmp-lg cs.CL}
} | covington1994an |
arxiv-668290 | cmp-lg/9404005 | Memoization in Constraint Logic Programming | <|reference_start|>Memoization in Constraint Logic Programming: This paper shows how to apply memoization (caching of subgoals and associated answer substitutions) in a constraint logic programming setting. The research is is motivated by the desire to apply constraint logic programming (CLP) to problems in natural language processing that involve (constraint) interleaving or coroutining, such as GB and HPSG parsing.<|reference_end|> | arxiv | @article{johnson1994memoization,
title={Memoization in Constraint Logic Programming},
author={Mark Johnson (Brown University)},
journal={arXiv preprint arXiv:cmp-lg/9404005},
year={1994},
archivePrefix={arXiv},
eprint={cmp-lg/9404005},
primaryClass={cmp-lg cs.CL}
} | johnson1994memoization |
arxiv-668291 | cmp-lg/9404006 | SPANISH 1992 (S92): corpus-based analysis of present-day Spanish for medical purposes | <|reference_start|>SPANISH 1992 (S92): corpus-based analysis of present-day Spanish for medical purposes: S92 research was begun in 1987 to analyze word frequencies in present-day Spanish for making speech pathology evaluation tools. 500 2,000-word samples of children, adolescents and adults' language were input between 1988-1991, calculations done in 1992; statistical and Lewandowski analyses were carried out in 1993.<|reference_end|> | arxiv | @article{chandler-burns1994spanish,
title={SPANISH 1992 (S92): corpus-based analysis of present-day Spanish for
medical purposes},
author={R.M. CHANDLER-BURNS (Medical College, Autonomous University of Nuevo
Leon, Monterrey, Mexico)},
journal={arXiv preprint arXiv:cmp-lg/9404006},
year={1994},
number={RMCB150494},
archivePrefix={arXiv},
eprint={cmp-lg/9404006},
primaryClass={cmp-lg cs.CL}
} | chandler-burns1994spanish |
arxiv-668292 | cmp-lg/9404007 | Constraint-Based Categorial Grammar | <|reference_start|>Constraint-Based Categorial Grammar: We propose a generalization of Categorial Grammar in which lexical categories are defined by means of recursive constraints. In particular, the introduction of relational constraints allows one to capture the effects of (recursive) lexical rules in a computationally attractive manner. We illustrate the linguistic merits of the new approach by showing how it accounts for the syntax of Dutch cross-serial dependencies and the position and scope of adjuncts in such constructions. Delayed evaluation is used to process grammars containing recursive constraints.<|reference_end|> | arxiv | @article{bouma1994constraint-based,
title={Constraint-Based Categorial Grammar},
author={Gosse Bouma (Alfa-Informatica, Rijksuniversiteit Groningen), Gertjan
van Noord (Alfa-Informatica, Rijksuniversiteit Groningen)},
journal={Proceedings ACL 94},
year={1994},
archivePrefix={arXiv},
eprint={cmp-lg/9404007},
primaryClass={cmp-lg cs.CL}
} | bouma1994constraint-based |
arxiv-668293 | cmp-lg/9404008 | Principles and Implementation of Deductive Parsing | <|reference_start|>Principles and Implementation of Deductive Parsing: We present a system for generating parsers based directly on the metaphor of parsing as deduction. Parsing algorithms can be represented directly as deduction systems, and a single deduction engine can interpret such deduction systems so as to implement the corresponding parser. The method generalizes easily to parsers for augmented phrase structure formalisms, such as definite-clause grammars and other logic grammar formalisms, and has been used for rapid prototyping of parsing algorithms for a variety of formalisms including variants of tree-adjoining grammars, categorial grammars, and lexicalized context-free grammars.<|reference_end|> | arxiv | @article{shieber1994principles,
title={Principles and Implementation of Deductive Parsing},
author={Stuart M. Shieber (Harvard University), Yves Schabes (Mitsubishi
Electric Research Laboratories), and Fernando C. N. Pereira (AT&T Bell
Laboratories)},
journal={arXiv preprint arXiv:cmp-lg/9404008},
year={1994},
number={CRCT TR-11-94 (Computer Science Department, Harvard University)},
archivePrefix={arXiv},
eprint={cmp-lg/9404008},
primaryClass={cmp-lg cs.CL}
} | shieber1994principles |
arxiv-668294 | cmp-lg/9404009 | A Deductive Account of Quantification in LFG | <|reference_start|>A Deductive Account of Quantification in LFG: The relationship between Lexical-Functional Grammar (LFG) functional structures (f-structures) for sentences and their semantic interpretations can be expressed directly in a fragment of linear logic in a way that explains correctly the constrained interactions between quantifier scope ambiguity and bound anaphora. The use of a deductive framework to account for the compositional properties of quantifying expressions in natural language obviates the need for additional mechanisms, such as Cooper storage, to represent the different scopes that a quantifier might take. Instead, the semantic contribution of a quantifier is recorded as an ordinary logical formula, one whose use in a proof will establish the scope of the quantifier. The properties of linear logic ensure that each quantifier is scoped exactly once. Our analysis of quantifier scope can be seen as a recasting of Pereira's analysis (Pereira, 1991), which was expressed in higher-order intuitionistic logic. But our use of LFG and linear logic provides a much more direct and computationally more flexible interpretation mechanism for at least the same range of phenomena. We have developed a preliminary Prolog implementation of the linear deductions described in this work.<|reference_end|> | arxiv | @article{dalrymple1994a,
title={A Deductive Account of Quantification in LFG},
author={Mary Dalrymple (Xerox PARC, Palo Alto CA), John Lamping (Xerox PARC,
Palo Alto CA), Fernando Pereira (AT&T Bell Laboratories, Murray Hill NJ),
Vijay Saraswat (Xerox PARC, Palo Alto CA)},
journal={arXiv preprint arXiv:cmp-lg/9404009},
year={1994},
number={ISTL-NLTT-1993-06-01},
archivePrefix={arXiv},
eprint={cmp-lg/9404009},
primaryClass={cmp-lg cs.CL}
} | dalrymple1994a |
arxiv-668295 | cmp-lg/9404010 | Intensional Verbs Without Type-Raising or Lexical Ambiguity | <|reference_start|>Intensional Verbs Without Type-Raising or Lexical Ambiguity: We present an analysis of the semantic interpretation of intensional verbs such as seek that allows them to take direct objects of either individual or quantifier type, producing both de dicto and de re readings in the quantifier case, all without needing to stipulate type-raising or quantifying-in rules. This simple account follows directly from our use of logical deduction in linear logic to express the relationship between syntactic structures and meanings. While our analysis resembles current categorial approaches in important ways, it differs from them in allowing the greater type flexibility of categorial semantics while maintaining a precise connection to syntax. As a result, we are able to provide derivations for certain readings of sentences with intensional verbs and complex direct objects that are not derivable in current purely categorial accounts of the syntax-semantics interface. The analysis forms a part of our ongoing work on semantic interpretation within the framework of Lexical-Functional Grammar.<|reference_end|> | arxiv | @article{dalrymple1994intensional,
title={Intensional Verbs Without Type-Raising or Lexical Ambiguity},
author={Mary Dalrymple (Xerox PARC, Palo Alto CA), John Lamping (Xerox PARC,
Palo Alto CA), Fernando Pereira (AT&T Bell Laboratories, Murray Hill NJ),
Vijay Saraswat (Xerox PARC, Palo Alto CA)},
journal={arXiv preprint arXiv:cmp-lg/9404010},
year={1994},
number={ISTL-NLTT-1994-02-01},
archivePrefix={arXiv},
eprint={cmp-lg/9404010},
primaryClass={cmp-lg cs.CL}
} | dalrymple1994intensional |
arxiv-668296 | cmp-lg/9404011 | Adjuncts and the Processing of Lexical Rules | <|reference_start|>Adjuncts and the Processing of Lexical Rules: The standard HPSG analysis of Germanic verb clusters can not explain the observed narrow-scope readings of adjuncts in such verb clusters. We present an extension of the HPSG analysis that accounts for the systematic ambiguity of the scope of adjuncts in verb cluster constructions, by treating adjuncts as members of the subcat list. The extension uses powerful recursive lexical rules, implemented as complex constraints. We show how `delayed evaluation' techniques from constraint-logic programming can be used to process such lexical rules.<|reference_end|> | arxiv | @article{van noord1994adjuncts,
title={Adjuncts and the Processing of Lexical Rules},
author={Gertjan van Noord (BCN RUG Groningen), Gosse Bouma (BCN RUG Groningen)},
journal={Proceedings of Coling 1994 Kyoto},
year={1994},
archivePrefix={arXiv},
eprint={cmp-lg/9404011},
primaryClass={cmp-lg cs.CL}
} | van noord1994adjuncts |
arxiv-668297 | cmp-lg/9405001 | Similarity-Based Estimation of Word Cooccurrence Probabilities | <|reference_start|>Similarity-Based Estimation of Word Cooccurrence Probabilities: In many applications of natural language processing it is necessary to determine the likelihood of a given word combination. For example, a speech recognizer may need to determine which of the two word combinations ``eat a peach'' and ``eat a beach'' is more likely. Statistical NLP methods determine the likelihood of a word combination according to its frequency in a training corpus. However, the nature of language is such that many word combinations are infrequent and do not occur in a given corpus. In this work we propose a method for estimating the probability of such previously unseen word combinations using available information on ``most similar'' words. We describe a probabilistic word association model based on distributional word similarity, and apply it to improving probability estimates for unseen word bigrams in a variant of Katz's back-off model. The similarity-based method yields a 20% perplexity improvement in the prediction of unseen bigrams and statistically significant reductions in speech-recognition error.<|reference_end|> | arxiv | @article{dagan1994similarity-based,
title={Similarity-Based Estimation of Word Cooccurrence Probabilities},
author={Ido Dagan (AT&T Bell Laboratories, Murray Hill, NJ 07974, USA),
Fernando Pereira (AT&T Bell Laboratories, Murray Hill, NJ 07974, USA),
Lillian Lee (DAS, Harvard University, Cambridge MA 02138, USA)},
journal={arXiv preprint arXiv:cmp-lg/9405001},
year={1994},
archivePrefix={arXiv},
eprint={cmp-lg/9405001},
primaryClass={cmp-lg cs.CL}
} | dagan1994similarity-based |
arxiv-668298 | cmp-lg/9405002 | Temporal Relations: Reference or Discourse Coherence? | <|reference_start|>Temporal Relations: Reference or Discourse Coherence?: The temporal relations that hold between events described by successive utterances are often left implicit or underspecified. We address the role of two phenomena with respect to the recovery of these relations: (1) the referential properties of tense, and (2) the role of temporal constraints imposed by coherence relations. We account for several facets of the identification of temporal relations through an integration of these.<|reference_end|> | arxiv | @article{kehler1994temporal,
title={Temporal Relations: Reference or Discourse Coherence?},
author={Andrew Kehler (Harvard University)},
journal={ACL-94 (Student Session), Las Cruces, New Mexico},
year={1994},
archivePrefix={arXiv},
eprint={cmp-lg/9405002},
primaryClass={cmp-lg cs.CL}
} | kehler1994temporal |
arxiv-668299 | cmp-lg/9405003 | Some Bibliographical References on Intonation and Intonational Meaning | <|reference_start|>Some Bibliographical References on Intonation and Intonational Meaning: A by-no-means-complete collection of references for those interested in intonational meaning, with other miscellaneous references on intonation included. Additional references are welcome, and should be sent to [email protected].<|reference_end|> | arxiv | @article{hirschberg1994some,
title={Some Bibliographical References on Intonation and Intonational Meaning},
author={Julia Hirschberg (AT&T Bell Laboratories)},
journal={arXiv preprint arXiv:cmp-lg/9405003},
year={1994},
archivePrefix={arXiv},
eprint={cmp-lg/9405003},
primaryClass={cmp-lg cs.CL}
} | hirschberg1994some |
arxiv-668300 | cmp-lg/9405004 | Syntactic-Head-Driven Generation | <|reference_start|>Syntactic-Head-Driven Generation: The previously proposed semantic-head-driven generation methods run into problems if none of the daughter constituents in the syntacto-semantic rule schemata of a grammar fits the definition of a semantic head given in Shieber et al. 1990. This is the case for the semantic analysis rules of certain constraint-based semantic representations, e.g. Underspecified Discourse Representation Structures (UDRSs) (Frank/Reyle 1992). Since head-driven generation in general has its merits, we simply return to a syntactic definition of `head' and demonstrate the feasibility of syntactic-head-driven generation. In addition to its generality, a syntactic-head-driven algorithm provides a basis for a logically well-defined treatment of the movement of (syntactic) heads, for which only ad-hoc solutions existed, so far.<|reference_end|> | arxiv | @article{koenig1994syntactic-head-driven,
title={Syntactic-Head-Driven Generation},
author={Esther Koenig (Institute for Computational Linguistics, Stuttgart
University)},
journal={arXiv preprint arXiv:cmp-lg/9405004},
year={1994},
archivePrefix={arXiv},
eprint={cmp-lg/9405004},
primaryClass={cmp-lg cs.CL}
} | koenig1994syntactic-head-driven |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.