diff --git a/_bibliography/papers.bib b/_bibliography/papers.bib
index fc38117..1276050 100644
--- a/_bibliography/papers.bib
+++ b/_bibliography/papers.bib
@@ -3,6 +3,22 @@
@string{aps = {American Physical Society,}}
+@inproceedings{zhang-etal-2025-test,
+ abbr={EMNLP},
+ bibtex_show={true},
+ title = {Test-Time Steering for Lossless Text Compression via Weighted Product of Experts},
+ author = {Zhang, Qihang and Li, Muchen and Wang, Ziao and Liao, Renjie and Wang, Lele},
+ booktitle = {Findings of the Association for Computational Linguistics: EMNLP 2025},
+ pages = {2076--2088},
+ year = {2025},
+ publisher = {Association for Computational Linguistics},
+ PDF = {https://aclanthology.org/2025.findings-emnlp.110/},
+ blog = {https://blog.qihang-zhang.com/2025/10/15/weighted-product-of-experts},
+ code = {https://github.com/DSL-Lab/Weighted-Product-of-Experts},
+ teaser={},
+ selected={true}
+}
+
@inproceedings{gao2025neural,
abbr={NeurIPS},
bibtex_show={true},
diff --git a/_posts/2025-10-11-max-ent-rl.md b/_posts/2025-10-11-max-ent-rl.md
index d6a6567..2ef1ee1 100644
--- a/_posts/2025-10-11-max-ent-rl.md
+++ b/_posts/2025-10-11-max-ent-rl.md
@@ -1,12 +1,12 @@
---
layout: distill
-title: Why the Exponential? From Max‑Entropy RL to the Boltzmann Distribution
+title: Why the Exponential? From Max‑Entropy RL to the Boltzmann Distribution
description: This blog post explores why the exponential function appears ubiquitously across modern RL, energy-based modeling, and statistical mechanics. We examine the connection between max-entropy reinforcement learning and the Boltzmann distribution, uncovering the fundamental principles that make the exponential form inevitable and explaining what "temperature" actually does in these frameworks.
tags: reinforcement-learning information-theory boltzmann-distribution
giscus_comments: true
date: 2025-10-11
featured: true
-redirect: https://qihang-zhang.com/Learning-Sys-Blog/2025/10/06/max-ent-rl-and-boltzmann-distribution.html
+redirect: https://blog.qihang-zhang.com/2025/10/06/max-ent-rl-and-boltzmann-distribution.html
authors:
- name: Qihang Zhang
@@ -15,3 +15,10 @@ authors:
name: UBC
---
+
+
+
+If you are not redirected automatically, you can read the full post here:
+[Why the Exponential? From Max‑Entropy RL to the Boltzmann Distribution](https://blog.qihang-zhang.com/2025/10/06/max-ent-rl-and-boltzmann-distribution.html).
diff --git a/_posts/2025-11-09-weighted-poe.md b/_posts/2025-11-09-weighted-poe.md
new file mode 100644
index 0000000..b956919
--- /dev/null
+++ b/_posts/2025-11-09-weighted-poe.md
@@ -0,0 +1,25 @@
+---
+layout: distill
+title: Test-Time Steering for Lossless Text Compression via Weighted Product of Experts
+description: >
+ When I was a child, I always wondered: if I keep compressing the same file, will it eventually shrink to nothing? Of course, the answer is no—once a file is optimally compressed by a lossless compressor, compressing it again with the same method gives a file of exactly the same size. Today I know this comes from the fundamental limits of lossless compression in information theory. But what if we use multiple compressors instead of one? If we combine them, can each remove a different part of the data’s redundancy—and how should such a combination be designed? In this blog we discussed the above questions and proposed a method called Weighted Product of Experts.
+tags: large-language-models lossless-compression mixture-of-experts information-theory
+giscus_comments: true
+date: 2025-11-09
+featured: true
+redirect: https://blog.qihang-zhang.com/2025/10/15/weighted-product-of-experts.html
+
+authors:
+ - name: Qihang Zhang
+ url: "https://qihang-zhang.com/"
+ affiliations:
+ name: UBC
+
+---
+
+
+
+If you are not redirected automatically, you can read the full post here:
+[Test-Time Steering for Lossless Text Compression via Weighted Product of Experts](https://blog.qihang-zhang.com/2025/10/15/weighted-product-of-experts.html).