43 Commits

Author SHA1 Message Date
Toby
bf2988116a Merge pull request #109 from apernet/wip-io-rst
feat: io tcp reset support (forward only)
2024-03-21 18:43:43 -07:00
Haruue
ef1416274d refactor(io): nft/ipt generator func 2024-03-22 02:04:25 +08:00
Toby
57c818038c feat: io tcp reset support (forward only) 2024-03-20 19:01:26 -07:00
Toby
6ad7714c9a Merge pull request #108 from apernet/fix-trojan-doc
docs: remove trojan-killer ref
2024-03-20 18:41:45 -07:00
Toby
ff9c4ccf79 docs: remove trojan-killer ref 2024-03-20 18:40:49 -07:00
Toby
e1d9406fdb Merge pull request #107 from apernet/fix-trojan
fix: trojan analyzer heuristics
2024-03-20 18:39:05 -07:00
Toby
b8e5079b8a fix: trojan analyzer heuristics 2024-03-20 18:37:43 -07:00
Toby
f3b72895ad Merge pull request #106 from apernet/wip-new-trojan
feat: new heuristics for trojan analyzer
2024-03-20 18:11:09 -07:00
Toby
0732dfa7a5 docs: no longer use trojan-killer 2024-03-20 18:10:22 -07:00
Toby
9d96acd8db feat: new heuristics for trojan analyzer 2024-03-20 18:07:26 -07:00
Toby
d1775184ce Merge pull request #102 from macie/badge_fix
docs: Fix status badge URL
2024-03-14 00:37:37 -07:00
macie
05d56616fc ci: Add readable name for workflow
It is visible on Workflows list inside GH Actions tab.
2024-03-14 08:14:30 +01:00
macie
ede70e1a87 docs: Fix badge URL 2024-03-14 08:06:27 +01:00
Toby
920783bd65 Merge pull request #98 from macie/ci
CI/CD: New workflow for testing and static analysis
2024-03-13 19:29:36 -07:00
macie
3a45461c19 test: Remove broken integration test
This tests fails when there are no specific binary files, but there is
no method to get the files during test.

Marking the tests as a skipped will gives us a false sense of security. So it's
better to remove it at all.
2024-03-12 12:15:10 +01:00
macie
3022bde81b fix: Linter errors
Fixed:
- "ineffective break statement. Did you mean to break out of the outer loop?" (SA4011)
- "channels used with os/signal.Notify should be buffered" (SA1017)
- "os.Kill cannot be trapped (did you mean syscall.SIGTERM?)" (SA1016)
- "func envOrDefaultBool is unused" (U1000)
- "should use time.Since instead of time.Now().Sub" (S1012)
2024-03-12 12:14:58 +01:00
macie
d98136bac7 ci: Add quality check
Runs tests and linters after each commit.
2024-03-11 22:19:26 +01:00
macie
c0e2483f6c test: Add basic tests for packet parsing
Tests performed on real-like packets secure expected behavior.
2024-03-11 21:25:34 +01:00
macie
3bd02ed46e refactor: Improve parsing docs
Reveal intentions by:
- extracting magic numbers into constants
- changing function names with >1 responsibilities
- documenting non-obvious behaviors.
2024-03-11 20:35:01 +01:00
Toby
4257788f33 Merge pull request #92 from apernet/wip-doc-openwrt-ipt
docs: instruction for OpenWrt 22.02 or earlier
2024-03-09 09:23:53 -08:00
Haruue
e77c2fabea docs: instruction for OpenWrt 22.02 or earlier 2024-03-09 21:42:49 +08:00
Toby
1dce82745d Merge pull request #85 from apernet/wip-buf
feat: netlink rcv/snd buffer config options
2024-02-29 11:23:08 -08:00
Toby
50cc94889f feat: netlink rcv/snd buffer config options 2024-02-28 17:45:24 -08:00
Toby
5d2d874089 Merge pull request #82 from apernet/update-fet
feat: update FET analyzer to better reflect what's described in the paper
2024-02-26 15:28:33 -08:00
Toby
797dce3dc2 feat: update FET analyzer to better reflect what's described in the paper 2024-02-26 15:27:35 -08:00
Toby
420286a46c Merge pull request #81 from apernet/update-gfwreport
chore: update gfw report links
2024-02-26 15:17:33 -08:00
Toby
531a7b0ceb chore: update gfw report links 2024-02-26 15:17:07 -08:00
Toby
20e0637756 Merge pull request #79 from apernet/update-ci
fix: release workflow
2024-02-26 10:50:44 -08:00
Toby
74dcc92fc6 fix: release workflow 2024-02-26 10:49:19 -08:00
Toby
b780ff65a4 Merge pull request #76 from apernet/fix-enobufs
fix: engine exit with "netlink receive: recvmsg: no buffer space available" when too many packets hit NFQUEUE
2024-02-26 10:40:08 -08:00
Haruue
8bd34d7798 chore: go mod tidy 2024-02-26 16:48:39 +08:00
Haruue
bed34f94be fix: engine exit when too many packets hit NFQUEUE 2024-02-26 16:46:50 +08:00
Toby
bc2e21e35d Merge pull request #75 from apernet/fix-missing-verdict
fix: verdict is missing for multicast packets
2024-02-26 00:12:42 -08:00
Haruue
a0b994ce22 fix: verdict is missing for multicast packets 2024-02-26 15:45:07 +08:00
Toby
8b07826de6 Merge pull request #71 from apernet/wip-log
feat: logging support in ruleset
2024-02-23 19:51:18 -08:00
Toby
aa6484dfa8 Merge pull request #73 from apernet/wip-readme2
docs: update README
2024-02-23 18:31:30 -08:00
Toby
29adf99dc1 docs: update README 2024-02-23 18:31:16 -08:00
Toby
71c739c18f Merge pull request #72 from apernet/update-readme
chore: better README
2024-02-23 15:43:25 -08:00
Toby
182a6cf878 chore: better README 2024-02-23 15:43:08 -08:00
Toby
ed9e380a57 fix: variable support & update example in doc 2024-02-23 14:37:05 -08:00
Toby
7353a16358 feat: logging support in ruleset 2024-02-23 14:13:35 -08:00
Toby
465373eaf1 Merge pull request #66 from apernet/readme-tg
docs: add telegram group link
2024-02-17 22:34:56 -08:00
Toby
f598cb572d docs: add telegram group link 2024-02-17 22:34:38 -08:00
25 changed files with 725 additions and 305 deletions

47
.github/workflows/check.yaml vendored Normal file
View File

@@ -0,0 +1,47 @@
name: Quality check
on:
push:
branches:
- "*"
pull_request:
permissions:
contents: read
jobs:
static-analysis:
name: Static analysis
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 'stable'
- run: go vet ./...
- name: staticcheck
uses: dominikh/staticcheck-action@v1.3.0
with:
install-go: false
tests:
name: Tests
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 'stable'
- run: go test ./...

View File

@@ -1,6 +1,7 @@
name: Release
on: on:
release: release:
types: [ created ] types: [published]
permissions: permissions:
contents: write contents: write
@@ -12,8 +13,8 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
goos: [ linux ] goos: [linux]
goarch: [ "386", amd64, arm64 ] goarch: ["386", amd64, arm64]
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: wangyoucao577/go-release-action@v1 - uses: wangyoucao577/go-release-action@v1
@@ -21,6 +22,6 @@ jobs:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }} goarch: ${{ matrix.goarch }}
goversion: "https://go.dev/dl/go1.21.6.linux-amd64.tar.gz" goversion: "https://go.dev/dl/go1.22.0.linux-amd64.tar.gz"
binary_name: "OpenGFW" binary_name: "OpenGFW"
extra_files: LICENSE README.md README.zh.md extra_files: LICENSE README.md README.zh.md

View File

@@ -1,11 +1,14 @@
# ![OpenGFW](docs/logo.png) # ![OpenGFW](docs/logo.png)
[![Quality check status](https://github.com/apernet/OpenGFW/actions/workflows/check.yaml/badge.svg)](https://github.com/apernet/OpenGFW/actions/workflows/check.yaml)
[![License][1]][2] [![License][1]][2]
[1]: https://img.shields.io/badge/License-MPL_2.0-brightgreen.svg [1]: https://img.shields.io/badge/License-MPL_2.0-brightgreen.svg
[2]: LICENSE [2]: LICENSE
OpenGFW は、Linux 上の [GFW](https://en.wikipedia.org/wiki/Great_Firewall) の柔軟で使いやすいオープンソース実装であり、多くの点で本物より強力です。これは家庭用ルーターでできるサイバー主権です。 OpenGFW は、あなた専用の DIY 中国のグレートファイアウォール (https://en.wikipedia.org/wiki/Great_Firewall) です。Linux 上で利用可能な柔軟で使いやすいオープンソースプログラムとして提供されています。なぜ権力者だけが楽しむのでしょうか?権力を人々に与え、検閲を民主化する時が来ました。自宅のルーターにサイバー主権のスリルをもたらし、プロのようにフィルタリングを始めましょう - あなたもビッグブラザーになることができます。
Telegram グループ: https://t.me/OpGFW
> [!CAUTION] > [!CAUTION]
> このプロジェクトはまだ開発の初期段階です。使用は自己責任でお願いします。 > このプロジェクトはまだ開発の初期段階です。使用は自己責任でお願いします。
@@ -17,8 +20,8 @@ OpenGFW は、Linux 上の [GFW](https://en.wikipedia.org/wiki/Great_Firewall)
- フル IP/TCP 再アセンブル、各種プロトコルアナライザー - フル IP/TCP 再アセンブル、各種プロトコルアナライザー
- HTTP、TLS、QUIC、DNS、SSH、SOCKS4/5、WireGuard、その他多数 - HTTP、TLS、QUIC、DNS、SSH、SOCKS4/5、WireGuard、その他多数
- Shadowsocks の「完全に暗号化されたトラフィック」の検出など (https://gfw.report/publications/usenixsecurity23/data/paper/paper.pdf) - Shadowsocks の「完全に暗号化されたトラフィック」の検出など (https://gfw.report/publications/usenixsecurity23/en/)
- トロイの木馬キラー (https://github.com/XTLS/Trojan-killer) に基づくトロイの木馬 (プロキシプロトコル) 検出 - Trojan プロキシプロトコル検出
- [WIP] 機械学習に基づくトラフィック分類 - [WIP] 機械学習に基づくトラフィック分類
- IPv4 と IPv6 をフルサポート - IPv4 と IPv6 をフルサポート
- フローベースのマルチコア負荷分散 - フローベースのマルチコア負荷分散
@@ -36,6 +39,7 @@ OpenGFW は、Linux 上の [GFW](https://en.wikipedia.org/wiki/Great_Firewall)
- マルウェア対策 - マルウェア対策
- VPN/プロキシサービスの不正利用防止 - VPN/プロキシサービスの不正利用防止
- トラフィック分析(ログのみモード) - トラフィック分析(ログのみモード)
- 独裁的な野心を実現するのを助ける
## 使用方法 ## 使用方法
@@ -59,7 +63,11 @@ OpenGFW は OpenWrt 23.05 で動作することがテストされています(
依存関係をインストールしてください: 依存関係をインストールしてください:
```shell ```shell
# バージョン22.03以降nftables をベースとしたファイアウォール)の場合
opkg install kmod-nft-queue kmod-nf-conntrack-netlink opkg install kmod-nft-queue kmod-nf-conntrack-netlink
# バージョン22.03以前22.03を除く、iptablesをベースとしたファイアウォールの場合
opkg install kmod-ipt-nfqueue iptables-mod-nfqueue kmod-nf-conntrack-netlink
``` ```
### 設定例 ### 設定例
@@ -67,7 +75,10 @@ opkg install kmod-nft-queue kmod-nf-conntrack-netlink
```yaml ```yaml
io: io:
queueSize: 1024 queueSize: 1024
rcvBuf: 4194304
sndBuf: 4194304
local: true # FORWARD チェーンで OpenGFW を実行したい場合は false に設定する local: true # FORWARD チェーンで OpenGFW を実行したい場合は false に設定する
rst: false # ブロックされたTCP接続に対してRSTを送信する場合はtrueに設定してください。local=falseのみです
workers: workers:
count: 4 count: 4
@@ -90,6 +101,11 @@ workers:
式言語の構文については、[Expr 言語定義](https://expr-lang.org/docs/language-definition)を参照してください。 式言語の構文については、[Expr 言語定義](https://expr-lang.org/docs/language-definition)を参照してください。
```yaml ```yaml
# ルールは、"action" または "log" の少なくとも一方が設定されていなければなりません。
- name: log horny people
log: true
expr: let sni = string(tls?.req?.sni); sni contains "porn" || sni contains "hentai"
- name: block v2ex http - name: block v2ex http
action: block action: block
expr: string(http?.req?.headers?.host) endsWith "v2ex.com" expr: string(http?.req?.headers?.host) endsWith "v2ex.com"
@@ -102,8 +118,9 @@ workers:
action: block action: block
expr: string(quic?.req?.sni) endsWith "v2ex.com" expr: string(quic?.req?.sni) endsWith "v2ex.com"
- name: block shadowsocks - name: block and log shadowsocks
action: block action: block
log: true
expr: fet != nil && fet.yes expr: fet != nil && fet.yes
- name: block trojan - name: block trojan

View File

@@ -1,5 +1,6 @@
# ![OpenGFW](docs/logo.png) # ![OpenGFW](docs/logo.png)
[![Quality check status](https://github.com/apernet/OpenGFW/actions/workflows/check.yaml/badge.svg)](https://github.com/apernet/OpenGFW/actions/workflows/check.yaml)
[![License][1]][2] [![License][1]][2]
[1]: https://img.shields.io/badge/License-MPL_2.0-brightgreen.svg [1]: https://img.shields.io/badge/License-MPL_2.0-brightgreen.svg
@@ -8,8 +9,9 @@
**[中文文档](README.zh.md)** **[中文文档](README.zh.md)**
**[日本語ドキュメント](README.ja.md)** **[日本語ドキュメント](README.ja.md)**
OpenGFW is a flexible, easy-to-use, open source implementation of [GFW](https://en.wikipedia.org/wiki/Great_Firewall) on OpenGFW is your very own DIY Great Firewall of China (https://en.wikipedia.org/wiki/Great_Firewall), available as a flexible, easy-to-use open source program on Linux. Why let the powers that be have all the fun? It's time to give power to the people and democratize censorship. Bring the thrill of cyber-sovereignty right into your home router and start filtering like a pro - you too can play Big Brother.
Linux that's in many ways more powerful than the real thing. It's cyber sovereignty you can have on a home router.
Telegram group: https://t.me/OpGFW
> [!CAUTION] > [!CAUTION]
> This project is still in very early stages of development. Use at your own risk. > This project is still in very early stages of development. Use at your own risk.
@@ -22,8 +24,8 @@ Linux that's in many ways more powerful than the real thing. It's cyber sovereig
- Full IP/TCP reassembly, various protocol analyzers - Full IP/TCP reassembly, various protocol analyzers
- HTTP, TLS, QUIC, DNS, SSH, SOCKS4/5, WireGuard, and many more to come - HTTP, TLS, QUIC, DNS, SSH, SOCKS4/5, WireGuard, and many more to come
- "Fully encrypted traffic" detection for Shadowsocks, - "Fully encrypted traffic" detection for Shadowsocks,
etc. (https://gfw.report/publications/usenixsecurity23/data/paper/paper.pdf) etc. (https://gfw.report/publications/usenixsecurity23/en/)
- Trojan (proxy protocol) detection based on Trojan-killer (https://github.com/XTLS/Trojan-killer) - Trojan (proxy protocol) detection
- [WIP] Machine learning based traffic classification - [WIP] Machine learning based traffic classification
- Full IPv4 and IPv6 support - Full IPv4 and IPv6 support
- Flow-based multicore load balancing - Flow-based multicore load balancing
@@ -41,6 +43,7 @@ Linux that's in many ways more powerful than the real thing. It's cyber sovereig
- Malware protection - Malware protection
- Abuse prevention for VPN/proxy services - Abuse prevention for VPN/proxy services
- Traffic analysis (log only mode) - Traffic analysis (log only mode)
- Help you fulfill your dictatorial ambitions
## Usage ## Usage
@@ -64,7 +67,11 @@ OpenGFW has been tested to work on OpenWrt 23.05 (other versions should also wor
Install the dependencies: Install the dependencies:
```shell ```shell
# For OpenWrt version 22.03 and later (nftables based firewall)
opkg install kmod-nft-queue kmod-nf-conntrack-netlink opkg install kmod-nft-queue kmod-nf-conntrack-netlink
# For OpenWrt versions prior to 22.03 (excluding 22.03, iptables based firewall)
opkg install kmod-ipt-nfqueue iptables-mod-nfqueue kmod-nf-conntrack-netlink
``` ```
### Example config ### Example config
@@ -72,7 +79,10 @@ opkg install kmod-nft-queue kmod-nf-conntrack-netlink
```yaml ```yaml
io: io:
queueSize: 1024 queueSize: 1024
rcvBuf: 4194304
sndBuf: 4194304
local: true # set to false if you want to run OpenGFW on FORWARD chain local: true # set to false if you want to run OpenGFW on FORWARD chain
rst: false # set to true if you want to send RST for blocked TCP connections, local=false only
workers: workers:
count: 4 count: 4
@@ -96,6 +106,11 @@ For syntax of the expression language, please refer
to [Expr Language Definition](https://expr-lang.org/docs/language-definition). to [Expr Language Definition](https://expr-lang.org/docs/language-definition).
```yaml ```yaml
# A rule must have at least one of "action" or "log" field set.
- name: log horny people
log: true
expr: let sni = string(tls?.req?.sni); sni contains "porn" || sni contains "hentai"
- name: block v2ex http - name: block v2ex http
action: block action: block
expr: string(http?.req?.headers?.host) endsWith "v2ex.com" expr: string(http?.req?.headers?.host) endsWith "v2ex.com"
@@ -108,8 +123,9 @@ to [Expr Language Definition](https://expr-lang.org/docs/language-definition).
action: block action: block
expr: string(quic?.req?.sni) endsWith "v2ex.com" expr: string(quic?.req?.sni) endsWith "v2ex.com"
- name: block shadowsocks - name: block and log shadowsocks
action: block action: block
log: true
expr: fet != nil && fet.yes expr: fet != nil && fet.yes
- name: block trojan - name: block trojan

View File

@@ -1,12 +1,14 @@
# ![OpenGFW](docs/logo.png) # ![OpenGFW](docs/logo.png)
[![Quality check status](https://github.com/apernet/OpenGFW/actions/workflows/check.yaml/badge.svg)](https://github.com/apernet/OpenGFW/actions/workflows/check.yaml)
[![License][1]][2] [![License][1]][2]
[1]: https://img.shields.io/badge/License-MPL_2.0-brightgreen.svg [1]: https://img.shields.io/badge/License-MPL_2.0-brightgreen.svg
[2]: LICENSE [2]: LICENSE
OpenGFW 是一个 Linux 上灵活、易用、开源的 [GFW](https://zh.wikipedia.org/wiki/%E9%98%B2%E7%81%AB%E9%95%BF%E5%9F%8E) OpenGFW 是一个 Linux 上灵活、易用、开源的 DIY [GFW](https://zh.wikipedia.org/wiki/%E9%98%B2%E7%81%AB%E9%95%BF%E5%9F%8E) 实现,并且在许多方面比真正的 GFW 更强大。为何让那些掌权者独享乐趣?是时候把权力归还给人民,人人有墙建了。立即安装可以部署在家用路由器上的网络主权 - 你也能是老大哥。
实现,并且在许多方面比真正的 GFW 更强大。可以部署在家用路由器上的网络主权。
Telegram 群组: https://t.me/OpGFW
> [!CAUTION] > [!CAUTION]
> 本项目仍处于早期开发阶段。测试时自行承担风险。 > 本项目仍处于早期开发阶段。测试时自行承担风险。
@@ -18,8 +20,8 @@ OpenGFW 是一个 Linux 上灵活、易用、开源的 [GFW](https://zh.wikipedi
- 完整的 IP/TCP 重组,各种协议解析器 - 完整的 IP/TCP 重组,各种协议解析器
- HTTP, TLS, QUIC, DNS, SSH, SOCKS4/5, WireGuard, 更多协议正在开发中 - HTTP, TLS, QUIC, DNS, SSH, SOCKS4/5, WireGuard, 更多协议正在开发中
- Shadowsocks 等 "全加密流量" 检测 (https://gfw.report/publications/usenixsecurity23/data/paper/paper.pdf) - Shadowsocks 等 "全加密流量" 检测 (https://gfw.report/publications/usenixsecurity23/zh/)
- 基于 Trojan-killer 的 Trojan 检测 (https://github.com/XTLS/Trojan-killer) - Trojan 协议检测
- [开发中] 基于机器学习的流量分类 - [开发中] 基于机器学习的流量分类
- 同等支持 IPv4 和 IPv6 - 同等支持 IPv4 和 IPv6
- 基于流的多核负载均衡 - 基于流的多核负载均衡
@@ -37,6 +39,7 @@ OpenGFW 是一个 Linux 上灵活、易用、开源的 [GFW](https://zh.wikipedi
- 恶意软件防护 - 恶意软件防护
- VPN/代理服务滥用防护 - VPN/代理服务滥用防护
- 流量分析 (纯日志模式) - 流量分析 (纯日志模式)
- 助你实现你的独裁野心
## 使用 ## 使用
@@ -60,7 +63,11 @@ OpenGFW 在 OpenWrt 23.05 上测试可用(其他版本应该也可以,暂时
安装依赖: 安装依赖:
```shell ```shell
# 对于 22.03 或者之后的版本(基于 nftables 的防火墙)
opkg install kmod-nft-queue kmod-nf-conntrack-netlink opkg install kmod-nft-queue kmod-nf-conntrack-netlink
# 对于 22.03 之前的版本(不包括 22.03 基于 iptables 的防火墙)
opkg install kmod-ipt-nfqueue iptables-mod-nfqueue kmod-nf-conntrack-netlink
``` ```
### 样例配置 ### 样例配置
@@ -68,7 +75,10 @@ opkg install kmod-nft-queue kmod-nf-conntrack-netlink
```yaml ```yaml
io: io:
queueSize: 1024 queueSize: 1024
rcvBuf: 4194304
sndBuf: 4194304
local: true # 如果需要在 FORWARD 链上运行 OpenGFW请设置为 false local: true # 如果需要在 FORWARD 链上运行 OpenGFW请设置为 false
rst: false # 是否对要阻断的 TCP 连接发送 RST。仅在 local=false 时有效
workers: workers:
count: 4 count: 4
@@ -91,6 +101,11 @@ workers:
规则的语法请参考 [Expr Language Definition](https://expr-lang.org/docs/language-definition)。 规则的语法请参考 [Expr Language Definition](https://expr-lang.org/docs/language-definition)。
```yaml ```yaml
# 每条规则必须至少包含 action 或 log 中的一个。
- name: log horny people
log: true
expr: let sni = string(tls?.req?.sni); sni contains "porn" || sni contains "hentai"
- name: block v2ex http - name: block v2ex http
action: block action: block
expr: string(http?.req?.headers?.host) endsWith "v2ex.com" expr: string(http?.req?.headers?.host) endsWith "v2ex.com"
@@ -103,8 +118,9 @@ workers:
action: block action: block
expr: string(quic?.req?.sni) endsWith "v2ex.com" expr: string(quic?.req?.sni) endsWith "v2ex.com"
- name: block shadowsocks - name: block and log shadowsocks
action: block action: block
log: true
expr: fet != nil && fet.yes expr: fet != nil && fet.yes
- name: block trojan - name: block trojan

View File

@@ -5,7 +5,26 @@ import (
"github.com/apernet/OpenGFW/analyzer/utils" "github.com/apernet/OpenGFW/analyzer/utils"
) )
func ParseTLSClientHello(chBuf *utils.ByteBuffer) analyzer.PropMap { // TLS record types.
const (
RecordTypeHandshake = 0x16
)
// TLS handshake message types.
const (
TypeClientHello = 0x01
TypeServerHello = 0x02
)
// TLS extension numbers.
const (
extServerName = 0x0000
extALPN = 0x0010
extSupportedVersions = 0x002b
extEncryptedClientHello = 0xfe0d
)
func ParseTLSClientHelloMsgData(chBuf *utils.ByteBuffer) analyzer.PropMap {
var ok bool var ok bool
m := make(analyzer.PropMap) m := make(analyzer.PropMap)
// Version, random & session ID length combined are within 35 bytes, // Version, random & session ID length combined are within 35 bytes,
@@ -76,7 +95,7 @@ func ParseTLSClientHello(chBuf *utils.ByteBuffer) analyzer.PropMap {
return m return m
} }
func ParseTLSServerHello(shBuf *utils.ByteBuffer) analyzer.PropMap { func ParseTLSServerHelloMsgData(shBuf *utils.ByteBuffer) analyzer.PropMap {
var ok bool var ok bool
m := make(analyzer.PropMap) m := make(analyzer.PropMap)
// Version, random & session ID length combined are within 35 bytes, // Version, random & session ID length combined are within 35 bytes,
@@ -133,7 +152,7 @@ func ParseTLSServerHello(shBuf *utils.ByteBuffer) analyzer.PropMap {
func parseTLSExtensions(extType uint16, extDataBuf *utils.ByteBuffer, m analyzer.PropMap) bool { func parseTLSExtensions(extType uint16, extDataBuf *utils.ByteBuffer, m analyzer.PropMap) bool {
switch extType { switch extType {
case 0x0000: // SNI case extServerName:
ok := extDataBuf.Skip(2) // Ignore list length, we only care about the first entry for now ok := extDataBuf.Skip(2) // Ignore list length, we only care about the first entry for now
if !ok { if !ok {
// Not enough data for list length // Not enough data for list length
@@ -154,7 +173,7 @@ func parseTLSExtensions(extType uint16, extDataBuf *utils.ByteBuffer, m analyzer
// Not enough data for SNI // Not enough data for SNI
return false return false
} }
case 0x0010: // ALPN case extALPN:
ok := extDataBuf.Skip(2) // Ignore list length, as we read until the end ok := extDataBuf.Skip(2) // Ignore list length, as we read until the end
if !ok { if !ok {
// Not enough data for list length // Not enough data for list length
@@ -175,7 +194,7 @@ func parseTLSExtensions(extType uint16, extDataBuf *utils.ByteBuffer, m analyzer
alpnList = append(alpnList, alpn) alpnList = append(alpnList, alpn)
} }
m["alpn"] = alpnList m["alpn"] = alpnList
case 0x002b: // Supported Versions case extSupportedVersions:
if extDataBuf.Len() == 2 { if extDataBuf.Len() == 2 {
// Server only selects one version // Server only selects one version
m["supported_versions"], _ = extDataBuf.GetUint16(false, true) m["supported_versions"], _ = extDataBuf.GetUint16(false, true)
@@ -197,7 +216,7 @@ func parseTLSExtensions(extType uint16, extDataBuf *utils.ByteBuffer, m analyzer
} }
m["supported_versions"] = versions m["supported_versions"] = versions
} }
case 0xfe0d: // ECH case extEncryptedClientHello:
// We can't parse ECH for now, just set a flag // We can't parse ECH for now, just set a flag
m["ech"] = true m["ech"] = true
} }

View File

@@ -143,8 +143,11 @@ func isTLSorHTTP(bytes []byte) bool {
if len(bytes) < 3 { if len(bytes) < 3 {
return false return false
} }
if bytes[0] == 0x16 && bytes[1] == 0x03 && bytes[2] <= 0x03 { // "We observe that the GFW exempts any connection whose first
// TLS handshake for TLS 1.0-1.3 // three bytes match the following regular expression:
// [\x16-\x17]\x03[\x00-\x09]" - from the paper in Section 4.3
if bytes[0] >= 0x16 && bytes[0] <= 0x17 &&
bytes[1] == 0x03 && bytes[2] <= 0x09 {
return true return true
} }
// HTTP request // HTTP request

64
analyzer/tcp/http_test.go Normal file
View File

@@ -0,0 +1,64 @@
package tcp
import (
"reflect"
"strings"
"testing"
"github.com/apernet/OpenGFW/analyzer"
)
func TestHTTPParsing_Request(t *testing.T) {
testCases := map[string]analyzer.PropMap{
"GET / HTTP/1.1\r\n": {
"method": "GET", "path": "/", "version": "HTTP/1.1",
},
"POST /hello?a=1&b=2 HTTP/1.0\r\n": {
"method": "POST", "path": "/hello?a=1&b=2", "version": "HTTP/1.0",
},
"PUT /world HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody": {
"method": "PUT", "path": "/world", "version": "HTTP/1.1", "headers": analyzer.PropMap{"content-length": "4"},
},
"DELETE /goodbye HTTP/2.0\r\n": {
"method": "DELETE", "path": "/goodbye", "version": "HTTP/2.0",
},
}
for tc, want := range testCases {
t.Run(strings.Split(tc, " ")[0], func(t *testing.T) {
tc, want := tc, want
t.Parallel()
u, _ := newHTTPStream(nil).Feed(false, false, false, 0, []byte(tc))
got := u.M.Get("req")
if !reflect.DeepEqual(got, want) {
t.Errorf("\"%s\" parsed = %v, want %v", tc, got, want)
}
})
}
}
func TestHTTPParsing_Response(t *testing.T) {
testCases := map[string]analyzer.PropMap{
"HTTP/1.0 200 OK\r\nContent-Length: 4\r\n\r\nbody": {
"version": "HTTP/1.0", "status": 200,
"headers": analyzer.PropMap{"content-length": "4"},
},
"HTTP/2.0 204 No Content\r\n\r\n": {
"version": "HTTP/2.0", "status": 204,
},
}
for tc, want := range testCases {
t.Run(strings.Split(tc, " ")[0], func(t *testing.T) {
tc, want := tc, want
t.Parallel()
u, _ := newHTTPStream(nil).Feed(true, false, false, 0, []byte(tc))
got := u.M.Get("resp")
if !reflect.DeepEqual(got, want) {
t.Errorf("\"%s\" parsed = %v, want %v", tc, got, want)
}
})
}
}

View File

@@ -208,10 +208,10 @@ func (s *socksStream) parseSocks5ReqMethod() utils.LSMAction {
switch method { switch method {
case Socks5AuthNotRequired: case Socks5AuthNotRequired:
s.authReqMethod = Socks5AuthNotRequired s.authReqMethod = Socks5AuthNotRequired
break return utils.LSMActionNext
case Socks5AuthPassword: case Socks5AuthPassword:
s.authReqMethod = Socks5AuthPassword s.authReqMethod = Socks5AuthPassword
break return utils.LSMActionNext
default: default:
// TODO: more auth method to support // TODO: more auth method to support
} }

View File

@@ -44,12 +44,12 @@ type tlsStream struct {
func newTLSStream(logger analyzer.Logger) *tlsStream { func newTLSStream(logger analyzer.Logger) *tlsStream {
s := &tlsStream{logger: logger, reqBuf: &utils.ByteBuffer{}, respBuf: &utils.ByteBuffer{}} s := &tlsStream{logger: logger, reqBuf: &utils.ByteBuffer{}, respBuf: &utils.ByteBuffer{}}
s.reqLSM = utils.NewLinearStateMachine( s.reqLSM = utils.NewLinearStateMachine(
s.tlsClientHelloSanityCheck, s.tlsClientHelloPreprocess,
s.parseClientHello, s.parseClientHelloData,
) )
s.respLSM = utils.NewLinearStateMachine( s.respLSM = utils.NewLinearStateMachine(
s.tlsServerHelloSanityCheck, s.tlsServerHelloPreprocess,
s.parseServerHello, s.parseServerHelloData,
) )
return s return s
} }
@@ -89,61 +89,105 @@ func (s *tlsStream) Feed(rev, start, end bool, skip int, data []byte) (u *analyz
return update, cancelled || (s.reqDone && s.respDone) return update, cancelled || (s.reqDone && s.respDone)
} }
func (s *tlsStream) tlsClientHelloSanityCheck() utils.LSMAction { // tlsClientHelloPreprocess validates ClientHello message.
data, ok := s.reqBuf.Get(9, true) //
// During validation, message header and first handshake header may be removed
// from `s.reqBuf`.
func (s *tlsStream) tlsClientHelloPreprocess() utils.LSMAction {
// headers size: content type (1 byte) + legacy protocol version (2 bytes) +
// + content length (2 bytes) + message type (1 byte) +
// + handshake length (3 bytes)
const headersSize = 9
// minimal data size: protocol version (2 bytes) + random (32 bytes) +
// + session ID (1 byte) + cipher suites (4 bytes) +
// + compression methods (2 bytes) + no extensions
const minDataSize = 41
header, ok := s.reqBuf.Get(headersSize, true)
if !ok { if !ok {
// not a full header yet
return utils.LSMActionPause return utils.LSMActionPause
} }
if data[0] != 0x16 || data[5] != 0x01 {
// Not a TLS handshake, or not a client hello if header[0] != internal.RecordTypeHandshake || header[5] != internal.TypeClientHello {
return utils.LSMActionCancel return utils.LSMActionCancel
} }
s.clientHelloLen = int(data[6])<<16 | int(data[7])<<8 | int(data[8])
if s.clientHelloLen < 41 { s.clientHelloLen = int(header[6])<<16 | int(header[7])<<8 | int(header[8])
// 2 (Protocol Version) + if s.clientHelloLen < minDataSize {
// 32 (Random) +
// 1 (Session ID Length) +
// 2 (Cipher Suites Length) +_ws.col.protocol == "TLSv1.3"
// 2 (Cipher Suite) +
// 1 (Compression Methods Length) +
// 1 (Compression Method) +
// No extensions
// This should be the bare minimum for a client hello
return utils.LSMActionCancel return utils.LSMActionCancel
} }
// TODO: something is missing. See:
// const messageHeaderSize = 4
// fullMessageLen := int(header[3])<<8 | int(header[4])
// msgNo := fullMessageLen / int(messageHeaderSize+s.serverHelloLen)
// if msgNo != 1 {
// // what here?
// }
// if messageNo != int(messageNo) {
// // what here?
// }
return utils.LSMActionNext return utils.LSMActionNext
} }
func (s *tlsStream) tlsServerHelloSanityCheck() utils.LSMAction { // tlsServerHelloPreprocess validates ServerHello message.
data, ok := s.respBuf.Get(9, true) //
// During validation, message header and first handshake header may be removed
// from `s.reqBuf`.
func (s *tlsStream) tlsServerHelloPreprocess() utils.LSMAction {
// header size: content type (1 byte) + legacy protocol version (2 byte) +
// + content length (2 byte) + message type (1 byte) +
// + handshake length (3 byte)
const headersSize = 9
// minimal data size: server version (2 byte) + random (32 byte) +
// + session ID (>=1 byte) + cipher suite (2 byte) +
// + compression method (1 byte) + no extensions
const minDataSize = 38
header, ok := s.respBuf.Get(headersSize, true)
if !ok { if !ok {
// not a full header yet
return utils.LSMActionPause return utils.LSMActionPause
} }
if data[0] != 0x16 || data[5] != 0x02 {
// Not a TLS handshake, or not a server hello if header[0] != internal.RecordTypeHandshake || header[5] != internal.TypeServerHello {
return utils.LSMActionCancel return utils.LSMActionCancel
} }
s.serverHelloLen = int(data[6])<<16 | int(data[7])<<8 | int(data[8])
if s.serverHelloLen < 38 { s.serverHelloLen = int(header[6])<<16 | int(header[7])<<8 | int(header[8])
// 2 (Protocol Version) + if s.serverHelloLen < minDataSize {
// 32 (Random) +
// 1 (Session ID Length) +
// 2 (Cipher Suite) +
// 1 (Compression Method) +
// No extensions
// This should be the bare minimum for a server hello
return utils.LSMActionCancel return utils.LSMActionCancel
} }
// TODO: something is missing. See example:
// const messageHeaderSize = 4
// fullMessageLen := int(header[3])<<8 | int(header[4])
// msgNo := fullMessageLen / int(messageHeaderSize+s.serverHelloLen)
// if msgNo != 1 {
// // what here?
// }
// if messageNo != int(messageNo) {
// // what here?
// }
return utils.LSMActionNext return utils.LSMActionNext
} }
func (s *tlsStream) parseClientHello() utils.LSMAction { // parseClientHelloData converts valid ClientHello message data (without
// headers) into `analyzer.PropMap`.
//
// Parsing error may leave `s.reqBuf` in an unusable state.
func (s *tlsStream) parseClientHelloData() utils.LSMAction {
chBuf, ok := s.reqBuf.GetSubBuffer(s.clientHelloLen, true) chBuf, ok := s.reqBuf.GetSubBuffer(s.clientHelloLen, true)
if !ok { if !ok {
// Not a full client hello yet // Not a full client hello yet
return utils.LSMActionPause return utils.LSMActionPause
} }
m := internal.ParseTLSClientHello(chBuf) m := internal.ParseTLSClientHelloMsgData(chBuf)
if m == nil { if m == nil {
return utils.LSMActionCancel return utils.LSMActionCancel
} else { } else {
@@ -153,13 +197,17 @@ func (s *tlsStream) parseClientHello() utils.LSMAction {
} }
} }
func (s *tlsStream) parseServerHello() utils.LSMAction { // parseServerHelloData converts valid ServerHello message data (without
// headers) into `analyzer.PropMap`.
//
// Parsing error may leave `s.respBuf` in an unusable state.
func (s *tlsStream) parseServerHelloData() utils.LSMAction {
shBuf, ok := s.respBuf.GetSubBuffer(s.serverHelloLen, true) shBuf, ok := s.respBuf.GetSubBuffer(s.serverHelloLen, true)
if !ok { if !ok {
// Not a full server hello yet // Not a full server hello yet
return utils.LSMActionPause return utils.LSMActionPause
} }
m := internal.ParseTLSServerHello(shBuf) m := internal.ParseTLSServerHelloMsgData(shBuf)
if m == nil { if m == nil {
return utils.LSMActionCancel return utils.LSMActionCancel
} else { } else {

69
analyzer/tcp/tls_test.go Normal file
View File

@@ -0,0 +1,69 @@
package tcp
import (
"reflect"
"testing"
"github.com/apernet/OpenGFW/analyzer"
)
func TestTlsStreamParsing_ClientHello(t *testing.T) {
// example packet taken from <https://tls12.xargs.org/#client-hello/annotated>
clientHello := []byte{
0x16, 0x03, 0x01, 0x00, 0xa5, 0x01, 0x00, 0x00, 0xa1, 0x03, 0x03, 0x00,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c,
0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x00, 0x00, 0x20, 0xcc, 0xa8,
0xcc, 0xa9, 0xc0, 0x2f, 0xc0, 0x30, 0xc0, 0x2b, 0xc0, 0x2c, 0xc0, 0x13,
0xc0, 0x09, 0xc0, 0x14, 0xc0, 0x0a, 0x00, 0x9c, 0x00, 0x9d, 0x00, 0x2f,
0x00, 0x35, 0xc0, 0x12, 0x00, 0x0a, 0x01, 0x00, 0x00, 0x58, 0x00, 0x00,
0x00, 0x18, 0x00, 0x16, 0x00, 0x00, 0x13, 0x65, 0x78, 0x61, 0x6d, 0x70,
0x6c, 0x65, 0x2e, 0x75, 0x6c, 0x66, 0x68, 0x65, 0x69, 0x6d, 0x2e, 0x6e,
0x65, 0x74, 0x00, 0x05, 0x00, 0x05, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x0a, 0x00, 0x0a, 0x00, 0x08, 0x00, 0x1d, 0x00, 0x17, 0x00, 0x18, 0x00,
0x19, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, 0x00, 0x0d, 0x00, 0x12, 0x00,
0x10, 0x04, 0x01, 0x04, 0x03, 0x05, 0x01, 0x05, 0x03, 0x06, 0x01, 0x06,
0x03, 0x02, 0x01, 0x02, 0x03, 0xff, 0x01, 0x00, 0x01, 0x00, 0x00, 0x12,
0x00, 0x00,
}
want := analyzer.PropMap{
"ciphers": []uint16{52392, 52393, 49199, 49200, 49195, 49196, 49171, 49161, 49172, 49162, 156, 157, 47, 53, 49170, 10},
"compression": []uint8{0},
"random": []uint8{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
"session": []uint8{},
"sni": "example.ulfheim.net",
"version": uint16(771),
}
s := newTLSStream(nil)
u, _ := s.Feed(false, false, false, 0, clientHello)
got := u.M.Get("req")
if !reflect.DeepEqual(got, want) {
t.Errorf("%d B parsed = %v, want %v", len(clientHello), got, want)
}
}
func TestTlsStreamParsing_ServerHello(t *testing.T) {
// example packet taken from <https://tls12.xargs.org/#server-hello/annotated>
serverHello := []byte{
0x16, 0x03, 0x03, 0x00, 0x31, 0x02, 0x00, 0x00, 0x2d, 0x03, 0x03, 0x70,
0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c,
0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88,
0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x00, 0xc0, 0x13, 0x00, 0x00,
0x05, 0xff, 0x01, 0x00, 0x01, 0x00,
}
want := analyzer.PropMap{
"cipher": uint16(49171),
"compression": uint8(0),
"random": []uint8{112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143},
"session": []uint8{},
"version": uint16(771),
}
s := newTLSStream(nil)
u, _ := s.Feed(true, false, false, 0, serverHello)
got := u.M.Get("resp")
if !reflect.DeepEqual(got, want) {
t.Errorf("%d B parsed = %v, want %v", len(serverHello), got, want)
}
}

View File

@@ -9,22 +9,14 @@ import (
var _ analyzer.TCPAnalyzer = (*TrojanAnalyzer)(nil) var _ analyzer.TCPAnalyzer = (*TrojanAnalyzer)(nil)
// CCS stands for "Change Cipher Spec" // CCS stands for "Change Cipher Spec"
var trojanCCS = []byte{20, 3, 3, 0, 1, 1} var ccsPattern = []byte{20, 3, 3, 0, 1, 1}
const ( // TrojanAnalyzer uses length-based heuristics to detect Trojan traffic based on
trojanUpLB = 650 // its "TLS-in-TLS" nature. The heuristics are trained using a decision tree with
trojanUpUB = 1000 // about 2000 samples. This is highly experimental and is known to have significant
trojanDownLB1 = 170 // false positives (about 9% false positives & 3% false negatives).
trojanDownUB1 = 180 // We do NOT recommend directly blocking all positive connections, as this is likely
trojanDownLB2 = 3000 // to break many normal TLS connections.
trojanDownUB2 = 7500
)
// TrojanAnalyzer uses a very simple packet length based check to determine
// if a TLS connection is actually the Trojan proxy protocol.
// The algorithm is from the following project, with small modifications:
// https://github.com/XTLS/Trojan-killer
// Warning: Experimental only. This method is known to have significant false positives and false negatives.
type TrojanAnalyzer struct{} type TrojanAnalyzer struct{}
func (a *TrojanAnalyzer) Name() string { func (a *TrojanAnalyzer) Name() string {
@@ -32,7 +24,7 @@ func (a *TrojanAnalyzer) Name() string {
} }
func (a *TrojanAnalyzer) Limit() int { func (a *TrojanAnalyzer) Limit() int {
return 16384 return 512000
} }
func (a *TrojanAnalyzer) NewTCP(info analyzer.TCPInfo, logger analyzer.Logger) analyzer.TCPStream { func (a *TrojanAnalyzer) NewTCP(info analyzer.TCPInfo, logger analyzer.Logger) analyzer.TCPStream {
@@ -40,10 +32,12 @@ func (a *TrojanAnalyzer) NewTCP(info analyzer.TCPInfo, logger analyzer.Logger) a
} }
type trojanStream struct { type trojanStream struct {
logger analyzer.Logger logger analyzer.Logger
active bool first bool
upCount int count bool
downCount int rev bool
seq [3]int
seqIndex int
} }
func newTrojanStream(logger analyzer.Logger) *trojanStream { func newTrojanStream(logger analyzer.Logger) *trojanStream {
@@ -57,33 +51,47 @@ func (s *trojanStream) Feed(rev, start, end bool, skip int, data []byte) (u *ana
if len(data) == 0 { if len(data) == 0 {
return nil, false return nil, false
} }
if !rev && !s.active && len(data) >= 6 && bytes.Equal(data[:6], trojanCCS) {
// Client CCS encountered, start counting if s.first {
s.active = true s.first = false
// Stop if it's not a valid TLS connection
if !(!rev && len(data) >= 3 && data[0] >= 0x16 && data[0] <= 0x17 &&
data[1] == 0x03 && data[2] <= 0x09) {
return nil, true
}
} }
if s.active {
if rev { if !rev && !s.count && len(data) >= 6 && bytes.Equal(data[:6], ccsPattern) {
// Down direction // Client Change Cipher Spec encountered, start counting
s.downCount += len(data) s.count = true
}
if s.count {
if rev == s.rev {
// Same direction as last time, just update the number
s.seq[s.seqIndex] += len(data)
} else { } else {
// Up direction // Different direction, bump the index
if s.upCount >= trojanUpLB && s.upCount <= trojanUpUB && s.seqIndex += 1
((s.downCount >= trojanDownLB1 && s.downCount <= trojanDownUB1) || if s.seqIndex == 3 {
(s.downCount >= trojanDownLB2 && s.downCount <= trojanDownUB2)) { // Time to evaluate
yes := s.seq[0] >= 180 &&
s.seq[1] <= 11000 &&
s.seq[2] >= 40
return &analyzer.PropUpdate{ return &analyzer.PropUpdate{
Type: analyzer.PropUpdateReplace, Type: analyzer.PropUpdateReplace,
M: analyzer.PropMap{ M: analyzer.PropMap{
"up": s.upCount, "seq": s.seq,
"down": s.downCount, "yes": yes,
"yes": true,
}, },
}, true }, true
} }
s.upCount += len(data) s.seq[s.seqIndex] += len(data)
s.rev = rev
} }
} }
// Give up when either direction is over the limit
return nil, s.upCount > trojanUpUB || s.downCount > trojanDownUB2 return nil, false
} }
func (s *trojanStream) Close(limited bool) *analyzer.PropUpdate { func (s *trojanStream) Close(limited bool) *analyzer.PropUpdate {

View File

@@ -36,41 +36,40 @@ type quicStream struct {
} }
func (s *quicStream) Feed(rev bool, data []byte) (u *analyzer.PropUpdate, done bool) { func (s *quicStream) Feed(rev bool, data []byte) (u *analyzer.PropUpdate, done bool) {
// minimal data size: protocol version (2 bytes) + random (32 bytes) +
// + session ID (1 byte) + cipher suites (4 bytes) +
// + compression methods (2 bytes) + no extensions
const minDataSize = 41
if rev { if rev {
// We don't support server direction for now // We don't support server direction for now
s.invalidCount++ s.invalidCount++
return nil, s.invalidCount >= quicInvalidCountThreshold return nil, s.invalidCount >= quicInvalidCountThreshold
} }
pl, err := quic.ReadCryptoPayload(data) pl, err := quic.ReadCryptoPayload(data)
if err != nil || len(pl) < 4 { if err != nil || len(pl) < 4 { // FIXME: isn't length checked inside quic.ReadCryptoPayload? Also, what about error handling?
s.invalidCount++ s.invalidCount++
return nil, s.invalidCount >= quicInvalidCountThreshold return nil, s.invalidCount >= quicInvalidCountThreshold
} }
// Should be a TLS client hello
if pl[0] != 0x01 { if pl[0] != internal.TypeClientHello {
// Not a client hello
s.invalidCount++ s.invalidCount++
return nil, s.invalidCount >= quicInvalidCountThreshold return nil, s.invalidCount >= quicInvalidCountThreshold
} }
chLen := int(pl[1])<<16 | int(pl[2])<<8 | int(pl[3]) chLen := int(pl[1])<<16 | int(pl[2])<<8 | int(pl[3])
if chLen < 41 { if chLen < minDataSize {
// 2 (Protocol Version) +
// 32 (Random) +
// 1 (Session ID Length) +
// 2 (Cipher Suites Length) +_ws.col.protocol == "TLSv1.3"
// 2 (Cipher Suite) +
// 1 (Compression Methods Length) +
// 1 (Compression Method) +
// No extensions
// This should be the bare minimum for a client hello
s.invalidCount++ s.invalidCount++
return nil, s.invalidCount >= quicInvalidCountThreshold return nil, s.invalidCount >= quicInvalidCountThreshold
} }
m := internal.ParseTLSClientHello(&utils.ByteBuffer{Buf: pl[4:]})
m := internal.ParseTLSClientHelloMsgData(&utils.ByteBuffer{Buf: pl[4:]})
if m == nil { if m == nil {
s.invalidCount++ s.invalidCount++
return nil, s.invalidCount >= quicInvalidCountThreshold return nil, s.invalidCount >= quicInvalidCountThreshold
} }
return &analyzer.PropUpdate{ return &analyzer.PropUpdate{
Type: analyzer.PropUpdateMerge, Type: analyzer.PropUpdateMerge,
M: analyzer.PropMap{"req": m}, M: analyzer.PropMap{"req": m},

58
analyzer/udp/quic_test.go Normal file
View File

@@ -0,0 +1,58 @@
package udp
import (
"reflect"
"testing"
"github.com/apernet/OpenGFW/analyzer"
)
func TestQuicStreamParsing_ClientHello(t *testing.T) {
// example packet taken from <https://quic.xargs.org/#client-initial-packet/annotated>
clientHello := make([]byte, 1200)
clientInitial := []byte{
0xcd, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
0x06, 0x07, 0x05, 0x63, 0x5f, 0x63, 0x69, 0x64, 0x00, 0x41, 0x03, 0x98,
0x1c, 0x36, 0xa7, 0xed, 0x78, 0x71, 0x6b, 0xe9, 0x71, 0x1b, 0xa4, 0x98,
0xb7, 0xed, 0x86, 0x84, 0x43, 0xbb, 0x2e, 0x0c, 0x51, 0x4d, 0x4d, 0x84,
0x8e, 0xad, 0xcc, 0x7a, 0x00, 0xd2, 0x5c, 0xe9, 0xf9, 0xaf, 0xa4, 0x83,
0x97, 0x80, 0x88, 0xde, 0x83, 0x6b, 0xe6, 0x8c, 0x0b, 0x32, 0xa2, 0x45,
0x95, 0xd7, 0x81, 0x3e, 0xa5, 0x41, 0x4a, 0x91, 0x99, 0x32, 0x9a, 0x6d,
0x9f, 0x7f, 0x76, 0x0d, 0xd8, 0xbb, 0x24, 0x9b, 0xf3, 0xf5, 0x3d, 0x9a,
0x77, 0xfb, 0xb7, 0xb3, 0x95, 0xb8, 0xd6, 0x6d, 0x78, 0x79, 0xa5, 0x1f,
0xe5, 0x9e, 0xf9, 0x60, 0x1f, 0x79, 0x99, 0x8e, 0xb3, 0x56, 0x8e, 0x1f,
0xdc, 0x78, 0x9f, 0x64, 0x0a, 0xca, 0xb3, 0x85, 0x8a, 0x82, 0xef, 0x29,
0x30, 0xfa, 0x5c, 0xe1, 0x4b, 0x5b, 0x9e, 0xa0, 0xbd, 0xb2, 0x9f, 0x45,
0x72, 0xda, 0x85, 0xaa, 0x3d, 0xef, 0x39, 0xb7, 0xef, 0xaf, 0xff, 0xa0,
0x74, 0xb9, 0x26, 0x70, 0x70, 0xd5, 0x0b, 0x5d, 0x07, 0x84, 0x2e, 0x49,
0xbb, 0xa3, 0xbc, 0x78, 0x7f, 0xf2, 0x95, 0xd6, 0xae, 0x3b, 0x51, 0x43,
0x05, 0xf1, 0x02, 0xaf, 0xe5, 0xa0, 0x47, 0xb3, 0xfb, 0x4c, 0x99, 0xeb,
0x92, 0xa2, 0x74, 0xd2, 0x44, 0xd6, 0x04, 0x92, 0xc0, 0xe2, 0xe6, 0xe2,
0x12, 0xce, 0xf0, 0xf9, 0xe3, 0xf6, 0x2e, 0xfd, 0x09, 0x55, 0xe7, 0x1c,
0x76, 0x8a, 0xa6, 0xbb, 0x3c, 0xd8, 0x0b, 0xbb, 0x37, 0x55, 0xc8, 0xb7,
0xeb, 0xee, 0x32, 0x71, 0x2f, 0x40, 0xf2, 0x24, 0x51, 0x19, 0x48, 0x70,
0x21, 0xb4, 0xb8, 0x4e, 0x15, 0x65, 0xe3, 0xca, 0x31, 0x96, 0x7a, 0xc8,
0x60, 0x4d, 0x40, 0x32, 0x17, 0x0d, 0xec, 0x28, 0x0a, 0xee, 0xfa, 0x09,
0x5d, 0x08, 0xb3, 0xb7, 0x24, 0x1e, 0xf6, 0x64, 0x6a, 0x6c, 0x86, 0xe5,
0xc6, 0x2c, 0xe0, 0x8b, 0xe0, 0x99,
}
copy(clientHello, clientInitial)
want := analyzer.PropMap{
"alpn": []string{"ping/1.0"},
"ciphers": []uint16{4865, 4866, 4867},
"compression": []uint8{0},
"random": []uint8{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
"session": []uint8{},
"sni": "example.ulfheim.net",
"supported_versions": []uint16{772},
"version": uint16(771),
}
s := quicStream{}
u, _ := s.Feed(false, clientHello)
got := u.M.Get("req")
if !reflect.DeepEqual(got, want) {
t.Errorf("%d B parsed = %v, want %v", len(clientHello), got, want)
}
}

View File

@@ -5,7 +5,6 @@ import (
"fmt" "fmt"
"os" "os"
"os/signal" "os/signal"
"strconv"
"strings" "strings"
"syscall" "syscall"
@@ -168,8 +167,11 @@ type cliConfig struct {
} }
type cliConfigIO struct { type cliConfigIO struct {
QueueSize uint32 `mapstructure:"queueSize"` QueueSize uint32 `mapstructure:"queueSize"`
Local bool `mapstructure:"local"` ReadBuffer int `mapstructure:"rcvBuf"`
WriteBuffer int `mapstructure:"sndBuf"`
Local bool `mapstructure:"local"`
RST bool `mapstructure:"rst"`
} }
type cliConfigWorkers struct { type cliConfigWorkers struct {
@@ -192,8 +194,11 @@ func (c *cliConfig) fillLogger(config *engine.Config) error {
func (c *cliConfig) fillIO(config *engine.Config) error { func (c *cliConfig) fillIO(config *engine.Config) error {
nfio, err := io.NewNFQueuePacketIO(io.NFQueuePacketIOConfig{ nfio, err := io.NewNFQueuePacketIO(io.NFQueuePacketIOConfig{
QueueSize: c.IO.QueueSize, QueueSize: c.IO.QueueSize,
Local: c.IO.Local, ReadBuffer: c.IO.ReadBuffer,
WriteBuffer: c.IO.WriteBuffer,
Local: c.IO.Local,
RST: c.IO.RST,
}) })
if err != nil { if err != nil {
return configError{Field: "io", Err: err} return configError{Field: "io", Err: err}
@@ -254,6 +259,7 @@ func runMain(cmd *cobra.Command, args []string) {
logger.Fatal("failed to load rules", zap.Error(err)) logger.Fatal("failed to load rules", zap.Error(err))
} }
rsConfig := &ruleset.BuiltinConfig{ rsConfig := &ruleset.BuiltinConfig{
Logger: &rulesetLogger{},
GeoSiteFilename: config.Ruleset.GeoSite, GeoSiteFilename: config.Ruleset.GeoSite,
GeoIpFilename: config.Ruleset.GeoIp, GeoIpFilename: config.Ruleset.GeoIp,
} }
@@ -273,15 +279,15 @@ func runMain(cmd *cobra.Command, args []string) {
ctx, cancelFunc := context.WithCancel(context.Background()) ctx, cancelFunc := context.WithCancel(context.Background())
go func() { go func() {
// Graceful shutdown // Graceful shutdown
shutdownChan := make(chan os.Signal) shutdownChan := make(chan os.Signal, 1)
signal.Notify(shutdownChan, os.Interrupt, os.Kill) signal.Notify(shutdownChan, os.Interrupt, syscall.SIGTERM)
<-shutdownChan <-shutdownChan
logger.Info("shutting down gracefully...") logger.Info("shutting down gracefully...")
cancelFunc() cancelFunc()
}() }()
go func() { go func() {
// Rule reload // Rule reload
reloadChan := make(chan os.Signal) reloadChan := make(chan os.Signal, 1)
signal.Notify(reloadChan, syscall.SIGHUP) signal.Notify(reloadChan, syscall.SIGHUP)
for { for {
<-reloadChan <-reloadChan
@@ -371,14 +377,6 @@ func (l *engineLogger) UDPStreamAction(info ruleset.StreamInfo, action ruleset.A
zap.Bool("noMatch", noMatch)) zap.Bool("noMatch", noMatch))
} }
func (l *engineLogger) MatchError(info ruleset.StreamInfo, err error) {
logger.Error("match error",
zap.Int64("id", info.ID),
zap.String("src", info.SrcString()),
zap.String("dst", info.DstString()),
zap.Error(err))
}
func (l *engineLogger) ModifyError(info ruleset.StreamInfo, err error) { func (l *engineLogger) ModifyError(info ruleset.StreamInfo, err error) {
logger.Error("modify error", logger.Error("modify error",
zap.Int64("id", info.ID), zap.Int64("id", info.ID),
@@ -408,17 +406,29 @@ func (l *engineLogger) AnalyzerErrorf(streamID int64, name string, format string
zap.String("msg", fmt.Sprintf(format, args...))) zap.String("msg", fmt.Sprintf(format, args...)))
} }
type rulesetLogger struct{}
func (l *rulesetLogger) Log(info ruleset.StreamInfo, name string) {
logger.Info("ruleset log",
zap.String("name", name),
zap.Int64("id", info.ID),
zap.String("src", info.SrcString()),
zap.String("dst", info.DstString()),
zap.Any("props", info.Props))
}
func (l *rulesetLogger) MatchError(info ruleset.StreamInfo, name string, err error) {
logger.Error("ruleset match error",
zap.String("name", name),
zap.Int64("id", info.ID),
zap.String("src", info.SrcString()),
zap.String("dst", info.DstString()),
zap.Error(err))
}
func envOrDefaultString(key, def string) string { func envOrDefaultString(key, def string) string {
if v := os.Getenv(key); v != "" { if v := os.Getenv(key); v != "" {
return v return v
} }
return def return def
} }
func envOrDefaultBool(key string, def bool) bool {
if v := os.Getenv(key); v != "" {
b, _ := strconv.ParseBool(v)
return b
}
return def
}

View File

@@ -246,13 +246,10 @@ Example for blocking QUIC connections to `quic.rocks`:
## Trojan (proxy protocol) ## Trojan (proxy protocol)
Check https://github.com/XTLS/Trojan-killer for more information.
```json ```json
{ {
"trojan": { "trojan": {
"down": 4712, "seq": [680, 4514, 293],
"up": 671,
"yes": true "yes": true
} }
} }

View File

@@ -41,7 +41,6 @@ type Logger interface {
UDPStreamPropUpdate(info ruleset.StreamInfo, close bool) UDPStreamPropUpdate(info ruleset.StreamInfo, close bool)
UDPStreamAction(info ruleset.StreamInfo, action ruleset.Action, noMatch bool) UDPStreamAction(info ruleset.StreamInfo, action ruleset.Action, noMatch bool)
MatchError(info ruleset.StreamInfo, err error)
ModifyError(info ruleset.StreamInfo, err error) ModifyError(info ruleset.StreamInfo, err error)
AnalyzerDebugf(streamID int64, name string, format string, args ...interface{}) AnalyzerDebugf(streamID int64, name string, format string, args ...interface{})

View File

@@ -148,10 +148,7 @@ func (s *tcpStream) ReassembledSG(sg reassembly.ScatterGather, ac reassembly.Ass
s.virgin = false s.virgin = false
s.logger.TCPStreamPropUpdate(s.info, false) s.logger.TCPStreamPropUpdate(s.info, false)
// Match properties against ruleset // Match properties against ruleset
result, err := s.ruleset.Match(s.info) result := s.ruleset.Match(s.info)
if err != nil {
s.logger.MatchError(s.info, err)
}
action := result.Action action := result.Action
if action != ruleset.ActionMaybe && action != ruleset.ActionModify { if action != ruleset.ActionMaybe && action != ruleset.ActionModify {
verdict := actionToTCPVerdict(action) verdict := actionToTCPVerdict(action)

View File

@@ -201,10 +201,7 @@ func (s *udpStream) Feed(udp *layers.UDP, rev bool, uc *udpContext) {
s.virgin = false s.virgin = false
s.logger.UDPStreamPropUpdate(s.info, false) s.logger.UDPStreamPropUpdate(s.info, false)
// Match properties against ruleset // Match properties against ruleset
result, err := s.ruleset.Match(s.info) result := s.ruleset.Match(s.info)
if err != nil {
s.logger.MatchError(s.info, err)
}
action := result.Action action := result.Action
if action == ruleset.ActionModify { if action == ruleset.ActionModify {
// Call the modifier instance // Call the modifier instance
@@ -214,6 +211,7 @@ func (s *udpStream) Feed(udp *layers.UDP, rev bool, uc *udpContext) {
s.logger.ModifyError(s.info, errInvalidModifier) s.logger.ModifyError(s.info, errInvalidModifier)
action = ruleset.ActionMaybe action = ruleset.ActionMaybe
} else { } else {
var err error
uc.Packet, err = udpMI.Process(udp.Payload) uc.Packet, err = udpMI.Process(udp.Payload)
if err != nil { if err != nil {
// Modifier error, fallback to maybe // Modifier error, fallback to maybe

5
go.mod
View File

@@ -13,15 +13,14 @@ require (
github.com/quic-go/quic-go v0.41.0 github.com/quic-go/quic-go v0.41.0
github.com/spf13/cobra v1.8.0 github.com/spf13/cobra v1.8.0
github.com/spf13/viper v1.18.2 github.com/spf13/viper v1.18.2
github.com/stretchr/testify v1.8.4
go.uber.org/zap v1.26.0 go.uber.org/zap v1.26.0
golang.org/x/crypto v0.19.0 golang.org/x/crypto v0.19.0
golang.org/x/sys v0.17.0
google.golang.org/protobuf v1.31.0 google.golang.org/protobuf v1.31.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
) )
require ( require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-cmp v0.5.9 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect
@@ -31,7 +30,6 @@ require (
github.com/mdlayher/socket v0.1.1 // indirect github.com/mdlayher/socket v0.1.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect
@@ -43,7 +41,6 @@ require (
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
golang.org/x/net v0.19.0 // indirect golang.org/x/net v0.19.0 // indirect
golang.org/x/sync v0.5.0 // indirect golang.org/x/sync v0.5.0 // indirect
golang.org/x/sys v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect golang.org/x/text v0.14.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
) )

View File

@@ -12,6 +12,7 @@ import (
"github.com/coreos/go-iptables/iptables" "github.com/coreos/go-iptables/iptables"
"github.com/florianl/go-nfqueue" "github.com/florianl/go-nfqueue"
"github.com/mdlayher/netlink" "github.com/mdlayher/netlink"
"golang.org/x/sys/unix"
) )
const ( const (
@@ -26,59 +27,60 @@ const (
nftTable = "opengfw" nftTable = "opengfw"
) )
var nftRulesForward = fmt.Sprintf(` func generateNftRules(local, rst bool) (*nftTableSpec, error) {
define ACCEPT_CTMARK=%d if local && rst {
define DROP_CTMARK=%d return nil, errors.New("tcp rst is not supported in local mode")
define QUEUE_NUM=%d }
table := &nftTableSpec{
table %s %s { Family: nftFamily,
chain FORWARD { Table: nftTable,
type filter hook forward priority filter; policy accept; }
table.Defines = append(table.Defines, fmt.Sprintf("define ACCEPT_CTMARK=%d", nfqueueConnMarkAccept))
ct mark $ACCEPT_CTMARK counter accept table.Defines = append(table.Defines, fmt.Sprintf("define DROP_CTMARK=%d", nfqueueConnMarkDrop))
ct mark $DROP_CTMARK counter drop table.Defines = append(table.Defines, fmt.Sprintf("define QUEUE_NUM=%d", nfqueueNum))
counter queue num $QUEUE_NUM bypass if local {
} table.Chains = []nftChainSpec{
} {Chain: "INPUT", Header: "type filter hook input priority filter; policy accept;"},
`, nfqueueConnMarkAccept, nfqueueConnMarkDrop, nfqueueNum, nftFamily, nftTable) {Chain: "OUTPUT", Header: "type filter hook output priority filter; policy accept;"},
}
var nftRulesLocal = fmt.Sprintf(` } else {
define ACCEPT_CTMARK=%d table.Chains = []nftChainSpec{
define DROP_CTMARK=%d {Chain: "FORWARD", Header: "type filter hook forward priority filter; policy accept;"},
define QUEUE_NUM=%d }
}
table %s %s { for i := range table.Chains {
chain INPUT { c := &table.Chains[i]
type filter hook input priority filter; policy accept; c.Rules = append(c.Rules, "ct mark $ACCEPT_CTMARK counter accept")
if rst {
ct mark $ACCEPT_CTMARK counter accept c.Rules = append(c.Rules, "ip protocol tcp ct mark $DROP_CTMARK counter reject with tcp reset")
ct mark $DROP_CTMARK counter drop }
counter queue num $QUEUE_NUM bypass c.Rules = append(c.Rules, "ct mark $DROP_CTMARK counter drop")
} c.Rules = append(c.Rules, "counter queue num $QUEUE_NUM bypass")
chain OUTPUT { }
type filter hook output priority filter; policy accept; return table, nil
ct mark $ACCEPT_CTMARK counter accept
ct mark $DROP_CTMARK counter drop
counter queue num $QUEUE_NUM bypass
}
}
`, nfqueueConnMarkAccept, nfqueueConnMarkDrop, nfqueueNum, nftFamily, nftTable)
var iptRulesForward = []iptRule{
{"filter", "FORWARD", []string{"-m", "connmark", "--mark", strconv.Itoa(nfqueueConnMarkAccept), "-j", "ACCEPT"}},
{"filter", "FORWARD", []string{"-m", "connmark", "--mark", strconv.Itoa(nfqueueConnMarkDrop), "-j", "DROP"}},
{"filter", "FORWARD", []string{"-j", "NFQUEUE", "--queue-num", strconv.Itoa(nfqueueNum), "--queue-bypass"}},
} }
var iptRulesLocal = []iptRule{ func generateIptRules(local, rst bool) ([]iptRule, error) {
{"filter", "INPUT", []string{"-m", "connmark", "--mark", strconv.Itoa(nfqueueConnMarkAccept), "-j", "ACCEPT"}}, if local && rst {
{"filter", "INPUT", []string{"-m", "connmark", "--mark", strconv.Itoa(nfqueueConnMarkDrop), "-j", "DROP"}}, return nil, errors.New("tcp rst is not supported in local mode")
{"filter", "INPUT", []string{"-j", "NFQUEUE", "--queue-num", strconv.Itoa(nfqueueNum), "--queue-bypass"}}, }
var chains []string
if local {
chains = []string{"INPUT", "OUTPUT"}
} else {
chains = []string{"FORWARD"}
}
rules := make([]iptRule, 0, 4*len(chains))
for _, chain := range chains {
rules = append(rules, iptRule{"filter", chain, []string{"-m", "connmark", "--mark", strconv.Itoa(nfqueueConnMarkAccept), "-j", "ACCEPT"}})
if rst {
rules = append(rules, iptRule{"filter", chain, []string{"-p", "tcp", "-m", "connmark", "--mark", strconv.Itoa(nfqueueConnMarkDrop), "-j", "REJECT", "--reject-with", "tcp-reset"}})
}
rules = append(rules, iptRule{"filter", chain, []string{"-m", "connmark", "--mark", strconv.Itoa(nfqueueConnMarkDrop), "-j", "DROP"}})
rules = append(rules, iptRule{"filter", chain, []string{"-j", "NFQUEUE", "--queue-num", strconv.Itoa(nfqueueNum), "--queue-bypass"}})
}
{"filter", "OUTPUT", []string{"-m", "connmark", "--mark", strconv.Itoa(nfqueueConnMarkAccept), "-j", "ACCEPT"}}, return rules, nil
{"filter", "OUTPUT", []string{"-m", "connmark", "--mark", strconv.Itoa(nfqueueConnMarkDrop), "-j", "DROP"}},
{"filter", "OUTPUT", []string{"-j", "NFQUEUE", "--queue-num", strconv.Itoa(nfqueueNum), "--queue-bypass"}},
} }
var _ PacketIO = (*nfqueuePacketIO)(nil) var _ PacketIO = (*nfqueuePacketIO)(nil)
@@ -88,6 +90,7 @@ var errNotNFQueuePacket = errors.New("not an NFQueue packet")
type nfqueuePacketIO struct { type nfqueuePacketIO struct {
n *nfqueue.Nfqueue n *nfqueue.Nfqueue
local bool local bool
rst bool
rSet bool // whether the nftables/iptables rules have been set rSet bool // whether the nftables/iptables rules have been set
// iptables not nil = use iptables instead of nftables // iptables not nil = use iptables instead of nftables
@@ -96,8 +99,11 @@ type nfqueuePacketIO struct {
} }
type NFQueuePacketIOConfig struct { type NFQueuePacketIOConfig struct {
QueueSize uint32 QueueSize uint32
Local bool ReadBuffer int
WriteBuffer int
Local bool
RST bool
} }
func NewNFQueuePacketIO(config NFQueuePacketIOConfig) (PacketIO, error) { func NewNFQueuePacketIO(config NFQueuePacketIOConfig) (PacketIO, error) {
@@ -127,9 +133,24 @@ func NewNFQueuePacketIO(config NFQueuePacketIOConfig) (PacketIO, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if config.ReadBuffer > 0 {
err = n.Con.SetReadBuffer(config.ReadBuffer)
if err != nil {
_ = n.Close()
return nil, err
}
}
if config.WriteBuffer > 0 {
err = n.Con.SetWriteBuffer(config.WriteBuffer)
if err != nil {
_ = n.Close()
return nil, err
}
}
return &nfqueuePacketIO{ return &nfqueuePacketIO{
n: n, n: n,
local: config.Local, local: config.Local,
rst: config.RST,
ipt4: ipt4, ipt4: ipt4,
ipt6: ipt6, ipt6: ipt6,
}, nil }, nil
@@ -138,9 +159,10 @@ func NewNFQueuePacketIO(config NFQueuePacketIOConfig) (PacketIO, error) {
func (n *nfqueuePacketIO) Register(ctx context.Context, cb PacketCallback) error { func (n *nfqueuePacketIO) Register(ctx context.Context, cb PacketCallback) error {
err := n.n.RegisterWithErrorFunc(ctx, err := n.n.RegisterWithErrorFunc(ctx,
func(a nfqueue.Attribute) int { func(a nfqueue.Attribute) int {
if a.PacketID == nil || a.Ct == nil || a.Payload == nil || len(*a.Payload) < 20 { if ok, verdict := n.packetAttributeSanityCheck(a); !ok {
// Invalid packet, ignore if a.PacketID != nil {
// 20 is the minimum possible size of an IP packet _ = n.n.SetVerdict(*a.PacketID, verdict)
}
return 0 return 0
} }
p := &nfqueuePacket{ p := &nfqueuePacket{
@@ -151,6 +173,12 @@ func (n *nfqueuePacketIO) Register(ctx context.Context, cb PacketCallback) error
return okBoolToInt(cb(p, nil)) return okBoolToInt(cb(p, nil))
}, },
func(e error) int { func(e error) int {
if opErr := (*netlink.OpError)(nil); errors.As(e, &opErr) {
if errors.Is(opErr.Err, unix.ENOBUFS) {
// Kernel buffer temporarily full, ignore
return 0
}
}
return okBoolToInt(cb(nil, e)) return okBoolToInt(cb(nil, e))
}) })
if err != nil { if err != nil {
@@ -158,9 +186,9 @@ func (n *nfqueuePacketIO) Register(ctx context.Context, cb PacketCallback) error
} }
if !n.rSet { if !n.rSet {
if n.ipt4 != nil { if n.ipt4 != nil {
err = n.setupIpt(n.local, false) err = n.setupIpt(n.local, n.rst, false)
} else { } else {
err = n.setupNft(n.local, false) err = n.setupNft(n.local, n.rst, false)
} }
if err != nil { if err != nil {
return err return err
@@ -170,6 +198,25 @@ func (n *nfqueuePacketIO) Register(ctx context.Context, cb PacketCallback) error
return nil return nil
} }
func (n *nfqueuePacketIO) packetAttributeSanityCheck(a nfqueue.Attribute) (ok bool, verdict int) {
if a.PacketID == nil {
// Re-inject to NFQUEUE is actually not possible in this condition
return false, -1
}
if a.Payload == nil || len(*a.Payload) < 20 {
// 20 is the minimum possible size of an IP packet
return false, nfqueue.NfDrop
}
if a.Ct == nil {
// Multicast packets may not have a conntrack, but only appear in local mode
if n.local {
return false, nfqueue.NfAccept
}
return false, nfqueue.NfDrop
}
return true, -1
}
func (n *nfqueuePacketIO) SetVerdict(p Packet, v Verdict, newPacket []byte) error { func (n *nfqueuePacketIO) SetVerdict(p Packet, v Verdict, newPacket []byte) error {
nP, ok := p.(*nfqueuePacket) nP, ok := p.(*nfqueuePacket)
if !ok { if !ok {
@@ -195,29 +242,27 @@ func (n *nfqueuePacketIO) SetVerdict(p Packet, v Verdict, newPacket []byte) erro
func (n *nfqueuePacketIO) Close() error { func (n *nfqueuePacketIO) Close() error {
if n.rSet { if n.rSet {
if n.ipt4 != nil { if n.ipt4 != nil {
_ = n.setupIpt(n.local, true) _ = n.setupIpt(n.local, n.rst, true)
} else { } else {
_ = n.setupNft(n.local, true) _ = n.setupNft(n.local, n.rst, true)
} }
n.rSet = false n.rSet = false
} }
return n.n.Close() return n.n.Close()
} }
func (n *nfqueuePacketIO) setupNft(local, remove bool) error { func (n *nfqueuePacketIO) setupNft(local, rst, remove bool) error {
var rules string rules, err := generateNftRules(local, rst)
if local { if err != nil {
rules = nftRulesLocal return err
} else {
rules = nftRulesForward
} }
var err error rulesText := rules.String()
if remove { if remove {
err = nftDelete(nftFamily, nftTable) err = nftDelete(nftFamily, nftTable)
} else { } else {
// Delete first to make sure no leftover rules // Delete first to make sure no leftover rules
_ = nftDelete(nftFamily, nftTable) _ = nftDelete(nftFamily, nftTable)
err = nftAdd(rules) err = nftAdd(rulesText)
} }
if err != nil { if err != nil {
return err return err
@@ -225,14 +270,11 @@ func (n *nfqueuePacketIO) setupNft(local, remove bool) error {
return nil return nil
} }
func (n *nfqueuePacketIO) setupIpt(local, remove bool) error { func (n *nfqueuePacketIO) setupIpt(local, rst, remove bool) error {
var rules []iptRule rules, err := generateIptRules(local, rst)
if local { if err != nil {
rules = iptRulesLocal return err
} else {
rules = iptRulesForward
} }
var err error
if remove { if remove {
err = iptsBatchDeleteIfExists([]*iptables.IPTables{n.ipt4, n.ipt6}, rules) err = iptsBatchDeleteIfExists([]*iptables.IPTables{n.ipt4, n.ipt6}, rules)
} else { } else {
@@ -287,6 +329,42 @@ func nftDelete(family, table string) error {
return cmd.Run() return cmd.Run()
} }
type nftTableSpec struct {
Defines []string
Family, Table string
Chains []nftChainSpec
}
func (t *nftTableSpec) String() string {
chains := make([]string, 0, len(t.Chains))
for _, c := range t.Chains {
chains = append(chains, c.String())
}
return fmt.Sprintf(`
%s
table %s %s {
%s
}
`, strings.Join(t.Defines, "\n"), t.Family, t.Table, strings.Join(chains, ""))
}
type nftChainSpec struct {
Chain string
Header string
Rules []string
}
func (c *nftChainSpec) String() string {
return fmt.Sprintf(`
chain %s {
%s
%s
}
`, c.Chain, c.Header, strings.Join(c.Rules, "\n\x20\x20\x20\x20"))
}
type iptRule struct { type iptRule struct {
Table, Chain string Table, Chain string
RuleSpec []string RuleSpec []string

View File

@@ -49,7 +49,7 @@ func (l *V2GeoLoader) shouldDownload(filename string) bool {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return true return true
} }
dt := time.Now().Sub(info.ModTime()) dt := time.Since(info.ModTime())
if l.UpdateInterval == 0 { if l.UpdateInterval == 0 {
return dt > geoDefaultUpdateInterval return dt > geoDefaultUpdateInterval
} else { } else {

View File

@@ -1,54 +0,0 @@
package v2geo
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestLoadGeoIP(t *testing.T) {
m, err := LoadGeoIP("geoip.dat")
assert.NoError(t, err)
// Exact checks since we know the data.
assert.Len(t, m, 252)
assert.Equal(t, m["cn"].CountryCode, "CN")
assert.Len(t, m["cn"].Cidr, 10407)
assert.Equal(t, m["us"].CountryCode, "US")
assert.Len(t, m["us"].Cidr, 193171)
assert.Equal(t, m["private"].CountryCode, "PRIVATE")
assert.Len(t, m["private"].Cidr, 18)
assert.Contains(t, m["private"].Cidr, &CIDR{
Ip: []byte("\xc0\xa8\x00\x00"),
Prefix: 16,
})
}
func TestLoadGeoSite(t *testing.T) {
m, err := LoadGeoSite("geosite.dat")
assert.NoError(t, err)
// Exact checks since we know the data.
assert.Len(t, m, 1204)
assert.Equal(t, m["netflix"].CountryCode, "NETFLIX")
assert.Len(t, m["netflix"].Domain, 25)
assert.Contains(t, m["netflix"].Domain, &Domain{
Type: Domain_Full,
Value: "netflix.com.edgesuite.net",
})
assert.Contains(t, m["netflix"].Domain, &Domain{
Type: Domain_RootDomain,
Value: "fast.com",
})
assert.Len(t, m["google"].Domain, 1066)
assert.Contains(t, m["google"].Domain, &Domain{
Type: Domain_RootDomain,
Value: "ggpht.cn",
Attribute: []*Domain_Attribute{
{
Key: "cn",
TypedValue: &Domain_Attribute_BoolValue{BoolValue: true},
},
},
})
}

View File

@@ -23,6 +23,7 @@ import (
type ExprRule struct { type ExprRule struct {
Name string `yaml:"name"` Name string `yaml:"name"`
Action string `yaml:"action"` Action string `yaml:"action"`
Log bool `yaml:"log"`
Modifier ModifierEntry `yaml:"modifier"` Modifier ModifierEntry `yaml:"modifier"`
Expr string `yaml:"expr"` Expr string `yaml:"expr"`
} }
@@ -45,7 +46,8 @@ func ExprRulesFromYAML(file string) ([]ExprRule, error) {
// compiledExprRule is the internal, compiled representation of an expression rule. // compiledExprRule is the internal, compiled representation of an expression rule.
type compiledExprRule struct { type compiledExprRule struct {
Name string Name string
Action Action Action *Action // fallthrough if nil
Log bool
ModInstance modifier.Instance ModInstance modifier.Instance
Program *vm.Program Program *vm.Program
} }
@@ -55,6 +57,7 @@ var _ Ruleset = (*exprRuleset)(nil)
type exprRuleset struct { type exprRuleset struct {
Rules []compiledExprRule Rules []compiledExprRule
Ans []analyzer.Analyzer Ans []analyzer.Analyzer
Logger Logger
GeoMatcher *geo.GeoMatcher GeoMatcher *geo.GeoMatcher
} }
@@ -62,25 +65,31 @@ func (r *exprRuleset) Analyzers(info StreamInfo) []analyzer.Analyzer {
return r.Ans return r.Ans
} }
func (r *exprRuleset) Match(info StreamInfo) (MatchResult, error) { func (r *exprRuleset) Match(info StreamInfo) MatchResult {
env := streamInfoToExprEnv(info) env := streamInfoToExprEnv(info)
for _, rule := range r.Rules { for _, rule := range r.Rules {
v, err := vm.Run(rule.Program, env) v, err := vm.Run(rule.Program, env)
if err != nil { if err != nil {
return MatchResult{ // Log the error and continue to the next rule.
Action: ActionMaybe, r.Logger.MatchError(info, rule.Name, err)
}, fmt.Errorf("rule %q failed to run: %w", rule.Name, err) continue
} }
if vBool, ok := v.(bool); ok && vBool { if vBool, ok := v.(bool); ok && vBool {
return MatchResult{ if rule.Log {
Action: rule.Action, r.Logger.Log(info, rule.Name)
ModInstance: rule.ModInstance, }
}, nil if rule.Action != nil {
return MatchResult{
Action: *rule.Action,
ModInstance: rule.ModInstance,
}
}
} }
} }
// No match
return MatchResult{ return MatchResult{
Action: ActionMaybe, Action: ActionMaybe,
}, nil }
} }
// CompileExprRules compiles a list of expression rules into a ruleset. // CompileExprRules compiles a list of expression rules into a ruleset.
@@ -97,11 +106,18 @@ func CompileExprRules(rules []ExprRule, ans []analyzer.Analyzer, mods []modifier
} }
// Compile all rules and build a map of analyzers that are used by the rules. // Compile all rules and build a map of analyzers that are used by the rules.
for _, rule := range rules { for _, rule := range rules {
action, ok := actionStringToAction(rule.Action) if rule.Action == "" && !rule.Log {
if !ok { return nil, fmt.Errorf("rule %q must have at least one of action or log", rule.Name)
return nil, fmt.Errorf("rule %q has invalid action %q", rule.Name, rule.Action)
} }
visitor := &idVisitor{Identifiers: make(map[string]bool)} var action *Action
if rule.Action != "" {
a, ok := actionStringToAction(rule.Action)
if !ok {
return nil, fmt.Errorf("rule %q has invalid action %q", rule.Name, rule.Action)
}
action = &a
}
visitor := &idVisitor{Variables: make(map[string]bool), Identifiers: make(map[string]bool)}
patcher := &idPatcher{} patcher := &idPatcher{}
program, err := expr.Compile(rule.Expr, program, err := expr.Compile(rule.Expr,
func(c *conf.Config) { func(c *conf.Config) {
@@ -118,7 +134,8 @@ func CompileExprRules(rules []ExprRule, ans []analyzer.Analyzer, mods []modifier
return nil, fmt.Errorf("rule %q failed to patch expression: %w", rule.Name, patcher.Err) return nil, fmt.Errorf("rule %q failed to patch expression: %w", rule.Name, patcher.Err)
} }
for name := range visitor.Identifiers { for name := range visitor.Identifiers {
if isBuiltInAnalyzer(name) { // Skip built-in analyzers & user-defined variables
if isBuiltInAnalyzer(name) || visitor.Variables[name] {
continue continue
} }
// Check if it's one of the built-in functions, and if so, // Check if it's one of the built-in functions, and if so,
@@ -145,9 +162,10 @@ func CompileExprRules(rules []ExprRule, ans []analyzer.Analyzer, mods []modifier
cr := compiledExprRule{ cr := compiledExprRule{
Name: rule.Name, Name: rule.Name,
Action: action, Action: action,
Log: rule.Log,
Program: program, Program: program,
} }
if action == ActionModify { if action != nil && *action == ActionModify {
mod, ok := fullModMap[rule.Modifier.Name] mod, ok := fullModMap[rule.Modifier.Name]
if !ok { if !ok {
return nil, fmt.Errorf("rule %q uses unknown modifier %q", rule.Name, rule.Modifier.Name) return nil, fmt.Errorf("rule %q uses unknown modifier %q", rule.Name, rule.Modifier.Name)
@@ -168,6 +186,7 @@ func CompileExprRules(rules []ExprRule, ans []analyzer.Analyzer, mods []modifier
return &exprRuleset{ return &exprRuleset{
Rules: compiledRules, Rules: compiledRules,
Ans: depAns, Ans: depAns,
Logger: config.Logger,
GeoMatcher: geoMatcher, GeoMatcher: geoMatcher,
}, nil }, nil
} }
@@ -265,11 +284,14 @@ func modifiersToMap(mods []modifier.Modifier) map[string]modifier.Modifier {
// idVisitor is a visitor that collects all identifiers in an expression. // idVisitor is a visitor that collects all identifiers in an expression.
// This is for determining which analyzers are used by the expression. // This is for determining which analyzers are used by the expression.
type idVisitor struct { type idVisitor struct {
Variables map[string]bool
Identifiers map[string]bool Identifiers map[string]bool
} }
func (v *idVisitor) Visit(node *ast.Node) { func (v *idVisitor) Visit(node *ast.Node) {
if idNode, ok := (*node).(*ast.IdentifierNode); ok { if varNode, ok := (*node).(*ast.VariableDeclaratorNode); ok {
v.Variables[varNode.Name] = true
} else if idNode, ok := (*node).(*ast.IdentifierNode); ok {
v.Identifiers[idNode.Value] = true v.Identifiers[idNode.Value] = true
} }
} }
@@ -284,6 +306,10 @@ func (p *idPatcher) Visit(node *ast.Node) {
switch (*node).(type) { switch (*node).(type) {
case *ast.CallNode: case *ast.CallNode:
callNode := (*node).(*ast.CallNode) callNode := (*node).(*ast.CallNode)
if callNode.Func == nil {
// Ignore invalid call nodes
return
}
switch callNode.Func.Name { switch callNode.Func.Name {
case "cidr": case "cidr":
cidrStringNode, ok := callNode.Arguments[1].(*ast.StringNode) cidrStringNode, ok := callNode.Arguments[1].(*ast.StringNode)

View File

@@ -90,10 +90,17 @@ type Ruleset interface {
Analyzers(StreamInfo) []analyzer.Analyzer Analyzers(StreamInfo) []analyzer.Analyzer
// Match matches a stream against the ruleset and returns the result. // Match matches a stream against the ruleset and returns the result.
// It must be safe for concurrent use by multiple workers. // It must be safe for concurrent use by multiple workers.
Match(StreamInfo) (MatchResult, error) Match(StreamInfo) MatchResult
}
// Logger is the logging interface for the ruleset.
type Logger interface {
Log(info StreamInfo, name string)
MatchError(info StreamInfo, name string, err error)
} }
type BuiltinConfig struct { type BuiltinConfig struct {
Logger Logger
GeoSiteFilename string GeoSiteFilename string
GeoIpFilename string GeoIpFilename string
} }