Compare commits

...

11 Commits

Author SHA1 Message Date
klzgrad
49153d97ef Add continuous integration and tests 2022-05-22 01:46:04 +08:00
klzgrad
406dbcc176 Add build scripts 2022-05-22 01:46:04 +08:00
klzgrad
e54adb3445 Add example config.json 2022-05-22 01:46:04 +08:00
klzgrad
85c6cc1a3c Add README 2022-05-22 01:46:04 +08:00
klzgrad
26f7e7bf3e Add LICENSE 2022-05-22 01:46:04 +08:00
klzgrad
083699b5b1 Add source import tool 2022-05-22 01:46:04 +08:00
klzgrad
cf8c61a5ff Add Cronet CGO SDK builder
bidi_example.cc does not work in cross compile yet, because libstdc++-dev
is not installed in cross compile sysroots.
2022-05-22 01:46:03 +08:00
klzgrad
c17764fb27 Add initial implementation of Naive client 2022-05-22 01:43:32 +08:00
klzgrad
2a0e13ae53 net, grpc_support: Set NetworkIsolationKey from header
If BidirectionalStream request contains a -network-isolation-key
header, it is used to set the network isolation key of the stream.
The header itself is removed and not transmitted.

The header value should be a valid URL with different host and port
for each different network isolation key. Invalid header value is
reported by returning error from bidirectional_stream_start.

Network isolation takes effect only if it is enabled by experimental
option of

  "feature_list": {
    "enable-features": "PartitionConnectionsByNetworkIsolationKey"
  }
2022-05-22 01:43:32 +08:00
klzgrad
b39a709937 cronet: Support setting feature list from experimental option 2022-05-22 01:43:32 +08:00
klzgrad
0ddeea513d cronet: Use fixed proxy resolution from experimental option proxy_server 2022-05-22 01:43:32 +08:00
50 changed files with 6357 additions and 24 deletions

622
.github/workflows/build.yml vendored Normal file
View File

@ -0,0 +1,622 @@
name: Build
on:
push:
branches: [master]
paths-ignore: [README.md]
release:
types: [published]
defaults:
run:
shell: bash
working-directory: src
env:
CACHE_EPOCH: 1
CCACHE_MAXSIZE: 200M
CCACHE_MAXFILES: 0
SCCACHE_CACHE_SIZE: 200M
jobs:
cache-toolchains-posix:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
- name: Cache toolchains (Linux, OpenWrt, Android)
uses: actions/cache@v2
with:
path: |
src/third_party/llvm-build/Release+Asserts/
src/gn/
src/qemu-user-static*.deb
key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache PGO (Linux, OpenWrt)
uses: actions/cache@v2
with:
path: src/chrome/build/pgo_profiles/
key: pgo-linux-openwrt-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache AFDO (Android)
uses: actions/cache@v2
with:
path: src/chrome/android/profiles/
key: afdo-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache Android NDK (Android)
uses: actions/cache@v2
with:
path: src/third_party/android_ndk/
key: android-ndk-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- run: ./get-clang.sh
- run: EXTRA_FLAGS='target_os="android"' ./get-clang.sh
- run: |
if [ ! -f qemu-user-static*.deb ]; then
wget https://snapshot.debian.org/archive/debian/20220515T152741Z/pool/main/q/qemu/qemu-user-static_7.0%2Bdfsg-6_amd64.deb
fi
cache-toolchains-win:
runs-on: windows-2019
steps:
- uses: actions/checkout@v2
- name: Cache toolchains
uses: actions/cache@v2
with:
path: |
src/third_party/llvm-build/Release+Asserts/
src/gn/
~/.cargo/bin/
~/bin/ninja.exe
key: toolchains-win-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache PGO (win64)
uses: actions/cache@v2
with:
path: src/chrome/build/pgo_profiles/chrome-win64-*
key: pgo-win64-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache PGO (win32)
uses: actions/cache@v2
with:
path: src/chrome/build/pgo_profiles/chrome-win32-*
key: pgo-win32-arm64-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- run: EXTRA_FLAGS='target_cpu="x64"' ./get-clang.sh
- run: EXTRA_FLAGS='target_cpu="x86"' ./get-clang.sh
- run: |
if [ ! -f ~/bin/ninja.exe ]; then
curl -LO https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-win.zip
unzip ninja-win.zip -d ~/bin
fi
cache-toolchains-mac:
runs-on: macos-11
steps:
- uses: actions/checkout@v2
- uses: actions/cache@v2
with:
path: |
src/third_party/llvm-build/Release+Asserts/
src/chrome/build/pgo_profiles/chrome-mac-*
src/gn/
key: toolchains-pgo-mac-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- run: EXTRA_FLAGS='target_cpu="x64"' ./get-clang.sh
- run: EXTRA_FLAGS='target_cpu="arm64"' ./get-clang.sh
linux:
needs: cache-toolchains-posix
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
arch: [x64, x86, arm64, arm, mipsel, mips64el]
env:
EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}"'
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
CRONET_BUNDLE: cronet-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v3
with:
go-version: '^1.18.1'
- name: Cache toolchains (Linux, OpenWrt, Android)
uses: actions/cache@v2
with:
path: |
src/third_party/llvm-build/Release+Asserts/
src/gn/
src/qemu-user-static*.deb
key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache PGO (Linux, OpenWrt)
uses: actions/cache@v2
with:
path: src/chrome/build/pgo_profiles/
key: pgo-linux-openwrt-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache sysroot
uses: actions/cache@v2
with:
path: src/out/sysroot-build/sid/sid_*
key: sysroot-linux-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- id: ccache-timestamp
run: echo "::set-output name=date::$(date +%s)"
- name: Cache ccache files
uses: actions/cache@v2
with:
path: ~/.ccache
key: ccache-linux-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.date }}
restore-keys: ccache-linux-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-
- name: Install APT packages
run: |
sudo apt update
sudo apt install ninja-build pkg-config qemu-user ccache bubblewrap
sudo apt remove -y qemu-user-binfmt
sudo dpkg -i qemu-user-static_7.0+dfsg-6_amd64.deb
# libc6-i386 interferes with x86 build
sudo apt remove libc6-i386
- run: ./get-clang.sh
- run: ccache -z
- run: ./build.sh
- run: ccache -s
- run: CCACHE_DISABLE=1 ./go-build.sh
working-directory: src/out/Release/cronet
- run: ../tests/basic.sh out/Release/naive
- name: Pack naiveproxy assets
run: |
mkdir ${{ env.BUNDLE }}
cp out/Release/naive config.json ../LICENSE ../USAGE.txt ${{ env.BUNDLE }}
tar cJf ${{ env.BUNDLE }}.tar.xz ${{ env.BUNDLE }}
openssl sha256 out/Release/naive >sha256sum.txt
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
- uses: actions/upload-artifact@v2
with:
name: ${{ env.BUNDLE }}.tar.xz naive executable sha256 ${{ env.SHA256SUM }}
path: src/sha256sum.txt
- name: Upload naiveproxy assets
if: ${{ github.event_name == 'release' }}
run: hub release edit -a ${{ env.BUNDLE }}.tar.xz -m "" "${GITHUB_REF##*/}"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Pack cronet assets
if: ${{ github.event_name == 'release' }}
run: |
mv out/Release/cronet ${{ env.CRONET_BUNDLE }}
tar cJf ${{ env.CRONET_BUNDLE }}.tar.xz ${{ env.CRONET_BUNDLE }}
openssl sha256 ${{ env.CRONET_BUNDLE }}.tar.xz >sha256sum.txt
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
- uses: actions/upload-artifact@v2
if: ${{ github.event_name == 'release' }}
with:
name: ${{ env.CRONET_BUNDLE }}.tar.xz sha256 ${{ env.SHA256SUM }}
path: src/sha256sum.txt
- name: Upload cronet assets
if: ${{ github.event_name == 'release' }}
run: hub release edit -a ${{ env.CRONET_BUNDLE }}.tar.xz -m "" "${GITHUB_REF##*/}"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
android:
needs: cache-toolchains-posix
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
arch: [x64, x86, arm64, arm]
env:
EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}" target_os="android"'
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
CRONET_BUNDLE: cronet-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v3
with:
go-version: '^1.18.1'
- name: Cache toolchains (Linux, OpenWrt, Android)
uses: actions/cache@v2
with:
path: |
src/third_party/llvm-build/Release+Asserts/
src/gn/
src/qemu-user-static*.deb
key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache AFDO (Android)
uses: actions/cache@v2
with:
path: src/chrome/android/profiles/
key: afdo-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache Android NDK (Android)
uses: actions/cache@v2
with:
path: src/third_party/android_ndk/
key: android-ndk-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache sysroot
uses: actions/cache@v2
with:
path: src/out/sysroot-build/android/
key: sysroot-android-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- id: ccache-timestamp
run: echo "::set-output name=date::$(date +%s)"
- name: Cache ccache files
uses: actions/cache@v2
with:
path: ~/.ccache
key: ccache-android-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.date }}
restore-keys: ccache-android-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-
- name: Install APT packages
run: |
sudo apt update
sudo apt install ninja-build pkg-config qemu-user ccache bubblewrap
sudo apt remove -y qemu-user-binfmt
sudo dpkg -i qemu-user-static_7.0+dfsg-6_amd64.deb
# libc6-i386 interferes with x86 build
sudo apt remove libc6-i386
- run: ./get-clang.sh
- run: ccache -z
- run: ./build.sh
- run: ccache -s
- run: ./get-android-sys.sh
- run: CCACHE_DISABLE=1 ./go-build.sh
working-directory: src/out/Release/cronet
- run: ../tests/basic.sh out/Release/naive
- name: Pack naiveproxy assets
run: |
mkdir ${{ env.BUNDLE }}
cp out/Release/naive config.json ../LICENSE ../USAGE.txt ${{ env.BUNDLE }}
tar cJf ${{ env.BUNDLE }}.tar.xz ${{ env.BUNDLE }}
openssl sha256 out/Release/naive >sha256sum.txt
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
- uses: actions/upload-artifact@v2
with:
name: ${{ env.BUNDLE }}.tar.xz naive executable sha256 ${{ env.SHA256SUM }}
path: src/sha256sum.txt
- name: Upload naiveproxy assets
if: ${{ github.event_name == 'release' }}
run: hub release edit -a ${{ env.BUNDLE }}.tar.xz -m "" "${GITHUB_REF##*/}"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Pack cronet assets
if: ${{ github.event_name == 'release' }}
run: |
mv out/Release/cronet ${{ env.CRONET_BUNDLE }}
tar cJf ${{ env.CRONET_BUNDLE }}.tar.xz ${{ env.CRONET_BUNDLE }}
openssl sha256 ${{ env.CRONET_BUNDLE }}.tar.xz >sha256sum.txt
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
- uses: actions/upload-artifact@v2
if: ${{ github.event_name == 'release' }}
with:
name: ${{ env.CRONET_BUNDLE }}.tar.xz sha256 ${{ env.SHA256SUM }}
path: src/sha256sum.txt
- name: Upload cronet assets
if: ${{ github.event_name == 'release' }}
run: hub release edit -a ${{ env.CRONET_BUNDLE }}.tar.xz -m "" "${GITHUB_REF##*/}"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
win:
needs: cache-toolchains-win
runs-on: windows-2019
strategy:
fail-fast: false
matrix:
arch: [x64, x86, arm64]
env:
EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}"'
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
CRONET_BUNDLE: cronet-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v3
with:
go-version: '^1.18.1'
- name: Cache toolchains
uses: actions/cache@v2
with:
path: |
src/third_party/llvm-build/Release+Asserts/
src/gn/
~/.cargo/bin/
~/bin/ninja.exe
key: toolchains-win-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache PGO (win64)
if: ${{ matrix.arch == 'x64' }}
uses: actions/cache@v2
with:
path: src/chrome/build/pgo_profiles/chrome-win64-*
key: pgo-win64-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache PGO (win32)
if: ${{ matrix.arch != 'x64' }}
uses: actions/cache@v2
with:
path: src/chrome/build/pgo_profiles/chrome-win32-*
key: pgo-win32-arm64-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- id: ccache-timestamp
run: echo "::set-output name=date::$(date +%s)"
- name: Cache ccache files
uses: actions/cache@v2
with:
path: ~/AppData/Local/Mozilla/sccache
key: ccache-win-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.date }}
restore-keys: ccache-win-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-
- run: ./get-clang.sh
- run: ~/.cargo/bin/sccache -z
- run: ./build.sh
- run: ~/.cargo/bin/sccache -s
- run: CCACHE_DISABLE=1 ./go-build.sh
working-directory: src/out/Release/cronet
- run: ../tests/basic.sh out/Release/naive
# No real or emulated environment is available to test this.
if: ${{ matrix.arch != 'arm64' }}
- name: Pack naiveproxy assets
run: |
mkdir ${{ env.BUNDLE }}
cp out/Release/naive config.json ../LICENSE ../USAGE.txt ${{ env.BUNDLE }}
7z a ${{ env.BUNDLE }}.zip ${{ env.BUNDLE }}
openssl sha256 out/Release/naive.exe >sha256sum.txt
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
- uses: actions/upload-artifact@v2
with:
name: ${{ env.BUNDLE }}.zip naive executable sha256 ${{ env.SHA256SUM }}
path: src/sha256sum.txt
- name: Upload naiveproxy assets
if: ${{ github.event_name == 'release' }}
run: hub release edit -a ${{ env.BUNDLE }}.zip -m "" "${GITHUB_REF##*/}"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Pack cronet assets
if: ${{ github.event_name == 'release' }}
run: |
mv out/Release/cronet ${{ env.CRONET_BUNDLE }}
7z a ${{ env.CRONET_BUNDLE }}.zip ${{ env.CRONET_BUNDLE }}
openssl sha256 ${{ env.CRONET_BUNDLE }}.zip >sha256sum.txt
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
- uses: actions/upload-artifact@v2
if: ${{ github.event_name == 'release' }}
with:
name: ${{ env.CRONET_BUNDLE }}.zip sha256 ${{ env.SHA256SUM }}
path: src/sha256sum.txt
- name: Upload cronet assets
if: ${{ github.event_name == 'release' }}
run: hub release edit -a ${{ env.CRONET_BUNDLE }}.zip -m "" "${GITHUB_REF##*/}"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
mac:
needs: cache-toolchains-mac
runs-on: macos-11
strategy:
fail-fast: false
matrix:
arch: [x64, arm64]
env:
EXTRA_FLAGS: 'target_cpu="${{ matrix.arch }}"'
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
CRONET_BUNDLE: cronet-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v3
with:
go-version: '^1.18.1'
- name: Cache toolchains and PGO
uses: actions/cache@v2
with:
path: |
src/third_party/llvm-build/Release+Asserts/
src/chrome/build/pgo_profiles/chrome-mac-*
src/gn/
key: toolchains-pgo-mac-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- id: ccache-timestamp
run: echo "::set-output name=date::$(date +%s)"
- name: Cache ccache files
uses: actions/cache@v2
with:
path: ~/Library/Caches/ccache
key: ccache-mac-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.date }}
restore-keys: ccache-mac-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-
- run: brew install ninja ccache
- run: ./get-clang.sh
- run: ccache -z
- run: ./build.sh
- run: ccache -s
- run: CCACHE_DISABLE=1 ./go-build.sh
working-directory: src/out/Release/cronet
- run: ../tests/basic.sh out/Release/naive
# No real or emulated environment is available to test this.
if: ${{ matrix.arch != 'arm64' }}
- name: Pack naiveproxy assets
run: |
mkdir ${{ env.BUNDLE }}
cp out/Release/naive config.json ../LICENSE ../USAGE.txt ${{ env.BUNDLE }}
tar cJf ${{ env.BUNDLE }}.tar.xz ${{ env.BUNDLE }}
openssl sha256 out/Release/naive >sha256sum.txt
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
- uses: actions/upload-artifact@v2
with:
name: ${{ env.BUNDLE }}.tar.xz naive executable sha256 ${{ env.SHA256SUM }}
path: src/sha256sum.txt
- name: Upload naiveproxy assets
if: ${{ github.event_name == 'release' }}
run: hub release edit -a ${{ env.BUNDLE }}.tar.xz -m "" "${GITHUB_REF##*/}"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Pack cronet assets
if: ${{ github.event_name == 'release' }}
run: |
mv out/Release/cronet ${{ env.CRONET_BUNDLE }}
tar cJf ${{ env.CRONET_BUNDLE }}.tar.xz ${{ env.CRONET_BUNDLE }}
openssl sha256 ${{ env.CRONET_BUNDLE }}.tar.xz >sha256sum.txt
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
- uses: actions/upload-artifact@v2
if: ${{ github.event_name == 'release' }}
with:
name: ${{ env.CRONET_BUNDLE }}.tar.xz sha256 ${{ env.SHA256SUM }}
path: src/sha256sum.txt
- name: Upload cronet assets
if: ${{ github.event_name == 'release' }}
run: hub release edit -a ${{ env.CRONET_BUNDLE }}.tar.xz -m "" "${GITHUB_REF##*/}"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
openwrt:
needs: cache-toolchains-posix
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
include:
- arch: x86_64
openwrt: 'target=x86 subtarget=64'
target_cpu: x64
- arch: x86
openwrt: 'target=x86 subtarget=generic'
target_cpu: x86
- arch: aarch64_cortex-a53
openwrt: 'target=sunxi subtarget=cortexa53'
target_cpu: arm64
extra: 'arm_cpu="cortex-a53"'
- arch: aarch64_cortex-a53-static
openwrt: 'target=sunxi subtarget=cortexa53'
target_cpu: arm64
extra: 'arm_cpu="cortex-a53" build_static=true'
- arch: aarch64_cortex-a72
openwrt: 'target=mvebu subtarget=cortexa72'
target_cpu: arm64
extra: 'arm_cpu="cortex-a72"'
- arch: aarch64_generic
openwrt: 'target=rockchip subtarget=armv8'
target_cpu: arm64
- arch: arm_arm1176jzf-s_vfp
openwrt: 'target=bcm27xx subtarget=bcm2708'
target_cpu: arm
extra: 'arm_version=0 arm_cpu="arm1176jzf-s" arm_fpu="vfp" arm_float_abi="hard" arm_use_neon=false arm_use_thumb=false'
- arch: arm_arm926ej-s
openwrt: 'target=mxs'
target_cpu: arm
extra: 'arm_version=0 arm_cpu="arm926ej-s" arm_float_abi="soft" arm_use_neon=false arm_use_thumb=false'
- arch: arm_cortex-a15_neon-vfpv4
openwrt: 'target=armvirt subtarget=32'
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a15" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true'
- arch: arm_cortex-a5_vfpv4
openwrt: 'target=at91 subtarget=sama5'
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a5" arm_fpu="vfpv4" arm_float_abi="hard" arm_use_neon=false'
- arch: arm_cortex-a7
openwrt: 'target=mediatek subtarget=mt7629'
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_float_abi="soft" arm_use_neon=false'
- arch: arm_cortex-a7_neon-vfpv4
openwrt: 'target=sunxi subtarget=cortexa7'
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true'
- arch: arm_cortex-a7_neon-vfpv4-static
openwrt: 'target=sunxi subtarget=cortexa7'
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true build_static=true'
- arch: arm_cortex-a8_vfpv3
openwrt: 'target=sunxi subtarget=cortexa8'
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a8" arm_fpu="vfpv3" arm_float_abi="hard" arm_use_neon=false'
- arch: arm_cortex-a9
openwrt: 'target=bcm53xx subtarget=generic'
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_float_abi="soft" arm_use_neon=false'
- arch: arm_cortex-a9-static
openwrt: 'target=bcm53xx subtarget=generic'
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_float_abi="soft" arm_use_neon=false build_static=true'
- arch: arm_cortex-a9_neon
openwrt: 'target=imx6'
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_fpu="neon" arm_float_abi="hard" arm_use_neon=true'
- arch: arm_cortex-a9_vfpv3-d16
openwrt: 'target=tegra'
target_cpu: arm
extra: 'arm_version=0 arm_cpu="cortex-a9" arm_fpu="vfpv3-d16" arm_float_abi="hard" arm_use_neon=false'
- arch: arm_mpcore
openwrt: 'target=oxnas subtarget=ox820'
target_cpu: arm
extra: 'arm_version=0 arm_cpu="mpcore" arm_float_abi="soft" arm_use_neon=false arm_use_thumb=false'
- arch: arm_xscale
openwrt: 'target=kirkwood'
target_cpu: arm
extra: 'arm_version=0 arm_cpu="xscale" arm_float_abi="soft" arm_use_neon=false arm_use_thumb=false'
- arch: mipsel_24kc
openwrt: 'target=ramips subtarget=rt305x'
target_cpu: mipsel
extra: 'mips_arch_variant="r2" mips_float_abi="soft" mips_tune="24kc"'
- arch: mipsel_74kc
openwrt: 'target=ramips subtarget=rt3883'
target_cpu: mipsel
extra: 'mips_arch_variant="r2" mips_float_abi="soft" mips_tune="74kc"'
- arch: mipsel_mips32
openwrt: 'target=bcm47xx subtarget=generic'
target_cpu: mipsel
extra: 'mips_arch_variant="r1" mips_float_abi="soft"'
env:
EXTRA_FLAGS: target_cpu="${{ matrix.target_cpu }}" target_os="openwrt" use_allocator="none" use_allocator_shim=false use_partition_alloc=false ${{ matrix.extra }}
OPENWRT_FLAGS: arch=${{ matrix.arch }} release=21.02.2 gcc_ver=8.4.0 ${{ matrix.openwrt }}
BUNDLE: naiveproxy-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
CRONET_BUNDLE: cronet-${{ github.event.release.tag_name }}-${{ github.job }}-${{ matrix.arch }}
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v3
with:
go-version: '^1.18.1'
- name: Cache toolchains (Linux, OpenWrt, Android)
uses: actions/cache@v2
with:
path: |
src/third_party/llvm-build/Release+Asserts/
src/gn/
src/qemu-user-static*.deb
key: toolchains-posix-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache PGO (Linux, OpenWrt)
uses: actions/cache@v2
with:
path: src/chrome/build/pgo_profiles/
key: pgo-linux-openwrt-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
- name: Cache sysroot
uses: actions/cache@v2
with:
path: src/out/sysroot-build/openwrt
key: sysroot-openwrt-21.02.2-${{ matrix.arch }}-v${{ env.CACHE_EPOCH }}
- id: ccache-timestamp
run: echo "::set-output name=date::$(date +%s)"
- name: Cache ccache files
uses: actions/cache@v2
with:
path: ~/.ccache
key: ccache-openwrt-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.date }}
restore-keys: ccache-openwrt-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-
- name: Install APT packages
run: |
sudo apt update
sudo apt install ninja-build pkg-config qemu-user ccache bubblewrap
sudo apt remove -y qemu-user-binfmt
sudo dpkg -i qemu-user-static_7.0+dfsg-6_amd64.deb
# libc6-i386 interferes with x86 build
sudo apt remove libc6-i386
- run: ./get-clang.sh
- run: ccache -z
- run: ./build.sh
- run: ccache -s
- run: CCACHE_DISABLE=1 ./go-build.sh
working-directory: src/out/Release/cronet
if: ${{ ! contains(matrix.extra, 'build_static=true') }}
- run: ../tests/basic.sh out/Release/naive
- name: Pack naiveproxy assets
run: |
mkdir ${{ env.BUNDLE }}
cp out/Release/naive config.json ../LICENSE ../USAGE.txt ${{ env.BUNDLE }}
tar cJf ${{ env.BUNDLE }}.tar.xz ${{ env.BUNDLE }}
openssl sha256 out/Release/naive >sha256sum.txt
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
- uses: actions/upload-artifact@v2
with:
name: ${{ env.BUNDLE }}.tar.xz naive executable sha256 ${{ env.SHA256SUM }}
path: src/sha256sum.txt
- name: Upload naiveproxy assets
if: ${{ github.event_name == 'release' }}
run: hub release edit -a ${{ env.BUNDLE }}.tar.xz -m "" "${GITHUB_REF##*/}"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Pack cronet assets
if: ${{ github.event_name == 'release' && ! contains(matrix.extra, 'build_static=true') }}
run: |
mv out/Release/cronet ${{ env.CRONET_BUNDLE }}
tar cJf ${{ env.CRONET_BUNDLE }}.tar.xz ${{ env.CRONET_BUNDLE }}
openssl sha256 ${{ env.CRONET_BUNDLE }}.tar.xz >sha256sum.txt
echo "SHA256SUM=$(cut -d' ' -f2 sha256sum.txt)" >>$GITHUB_ENV
- uses: actions/upload-artifact@v2
if: ${{ github.event_name == 'release' && ! contains(matrix.extra, 'build_static=true') }}
with:
name: ${{ env.CRONET_BUNDLE }}.tar.xz sha256 ${{ env.SHA256SUM }}
path: src/sha256sum.txt
- name: Upload cronet assets
if: ${{ github.event_name == 'release' && ! contains(matrix.extra, 'build_static=true') }}
run: hub release edit -a ${{ env.CRONET_BUNDLE }}.tar.xz -m "" "${GITHUB_REF##*/}"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

1
CHROMIUM_VERSION Normal file
View File

@ -0,0 +1 @@
101.0.4951.41

27
LICENSE Normal file
View File

@ -0,0 +1,27 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

147
README.md Normal file
View File

@ -0,0 +1,147 @@
# NaïveProxy ![build workflow](https://github.com/klzgrad/naiveproxy/actions/workflows/build.yml/badge.svg)
NaïveProxy uses Chrome's network stack to camouflage traffic with strong censorship resistence and low detectablility. Reusing Chrome's stack also ensures best practices in performance and security.
The following traffic attacks are mitigated in NaïveProxy:
* Website fingerprinting / traffic classification: [mitigated](https://arxiv.org/abs/1707.00641) by traffic multiplexing in HTTP/2.
* [TLS parameter fingerprinting](https://arxiv.org/abs/1607.01639): defeated by reusing [Chrome's network stack](https://www.chromium.org/developers/design-documents/network-stack).
* [Active probing](https://ensa.fi/active-probing/): defeated by *application fronting*, i.e. hiding proxy servers behind a commonly used frontend server with application-layer routing.
* Length-based traffic analysis: mitigated by length padding.
## Architecture
[Browser → Naïve client] ⟶ Censor ⟶ [Frontend → Naïve server] ⟶ Internet
NaïveProxy uses Chrome's network stack to ensure its observable behavior is identical to regular HTTP/2 traffic between Chrome and standard frontend servers.
The frontend server can be any reverse proxy that is able to route HTTP/2 traffic based on HTTP authorization headers, preventing active probing of proxy existence. Known ones include Caddy with its forwardproxy plugin and HAProxy.
The Naïve server here works as a forward proxy and a packet length padding layer. Caddy forwardproxy is also a forward proxy but it lacks a padding layer. A [fork](https://github.com/klzgrad/forwardproxy) adds the NaïveProxy padding layer to forwardproxy, combining both in one.
## Download binaries
[Download here](https://github.com/klzgrad/naiveproxy/releases/latest). Supported platforms include: Windows, Android (with [SagerNet](https://github.com/SagerNet/SagerNet)), Linux, Mac OS, and OpenWrt ([support status](https://github.com/klzgrad/naiveproxy/wiki/OpenWrt-Support)).
Users should always use the latest version to keep signatures identical to Chrome.
## Server setup
The following describes the naïve fork of forwardproxy setup.
Build:
```sh
go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest
~/go/bin/xcaddy build --with github.com/caddyserver/forwardproxy@caddy2=github.com/klzgrad/forwardproxy@naive
```
Example Caddyfile (replace `user` and `pass` accordingly):
```
{
servers {
protocol {
experimental_http3
}
}
}
:443, example.com
tls me@example.com
route {
forward_proxy {
basic_auth user pass
hide_ip
hide_via
probe_resistance
}
file_server { root /var/www/html }
}
```
`:443` must appear first for this Caddyfile to work. For more advanced usage consider using [JSON for Caddy 2's config](https://caddyserver.com/docs/json/).
Run with the Caddyfile:
```
sudo setcap cap_net_bind_service=+ep ./caddy
./caddy start
```
See also [Systemd unit example](https://github.com/klzgrad/naiveproxy/wiki/Run-Caddy-as-a-daemon) and [HAProxy setup](https://github.com/klzgrad/naiveproxy/wiki/HAProxy-Setup).
## Client setup
Run `./naive` with the following `config.json` to get a SOCKS5 proxy at local port 1080.
```json
{
"listen": "socks://127.0.0.1:1080",
"proxy": "https://user:pass@example.com"
}
```
Or `quic://user:pass@example.com`, if it works better. See also [parameter usage](https://github.com/klzgrad/naiveproxy/blob/master/USAGE.txt) and [performance tuning](https://github.com/klzgrad/naiveproxy/wiki/Performance-Tuning).
## Build from source
If you don't like to download binaries, you can build NaïveProxy.
Prerequisites:
* Ubuntu (apt install): git, python, ninja-build (>= 1.7), pkg-config, curl, unzip, ccache (optional)
* MacOS (brew install): git, ninja, ccache (optional)
* Windows ([choco install](https://chocolatey.org/)): git, python, ninja, visualstudio2019community. See [Chromium's page](https://chromium.googlesource.com/chromium/src/+/master/docs/windows_build_instructions.md#Visual-Studio) for detail on Visual Studio requirements.
Build (output to `./out/Release/naive`):
```
git clone --depth 1 https://github.com/klzgrad/naiveproxy.git
cd naiveproxy/src
./get-clang.sh
./build.sh
```
The scripts download tools from Google servers with curl. You may need to set a proxy environment variable for curl, e.g. `export ALL_PROXY=socks5h://127.0.0.1:1080`.
## Notes for downstream
Do not use the master branch to track updates, as it rebases from a new root commit for every new Chrome release. Use stable releases and the associated tags to track new versions, where short release notes are also provided.
## FAQ
### Why not use Go, Node, etc. for TLS?
Their TLS stacks have distinct features that can be [easily detected](https://arxiv.org/abs/1607.01639). TLS parameters are generally very informative and distinguishable. Most client-originated traffic comes from browsers, putting the custom network stacks in the minority.
Previously, Tor tried to mimic Firefox's TLS signature and still got [identified and blocked by firewalls](https://groups.google.com/d/msg/traffic-obf/BpFSCVgi5rs/nCqNwoeRKQAJ), because that signature was of an outdated version of Firefox and the firewall determined the rate of collateral damage would be acceptable. If we use the signature of the most commonly used browser, the collateral damage of blocking it would be unacceptable.
### Why not use Go, Node, etc. for performance?
Any languages can be used for high performance architectures, but not all architectures have high performance.
Go, Node, etc. make it easy to implement a 1:1 connection proxy model, i.e. creating one upstream connection for every user connection. Then under this model the performance goal is lower overhead in setting up each upstream connection. Toward that goal people start to reinvent their own 0-RTT cryptographic protocols (badly) as TLS goes out of the window because it either spends take several round trips in handshakes or makes it [a pain to set up 0-RTT properly](https://tools.ietf.org/html/rfc8446#section-8). Then people also start to look at low level optimization such as TCP Fast Open.
Meanwhile, Google has removed the code for TCP Fast Open in Chromium altogether (they [authored](https://tools.ietf.org/html/rfc7413) the RFC of TCP Fast Open in 2014). The literal reason given for this reversal was
> We never enabled it by default, and have no plans to, so we should just remove it. QUIC also makes it less useful, and TLS 1.2 0-RTT session restore means it potentially mutates state.
And the real reason Google never enabled TCP Fast Open by default is that it was dragged down by middleboxes and [never really worked](https://blog.donatas.net/blog/2017/03/09/tfo/). In Linux kernel there is a sysctl called `tcp_fastopen_blackhole_timeout_sec`, and whenever a SYN packet is dropped, TCP Fast Open is blackholed for this much time, starting at one hour and increasing exponentially, rendering it practically useless. Today TCP Fast Open accounts for [0.1% of the Internet traffic](https://ieeexplore.ieee.org/document/8303960/), so using it actually makes you highly detectable!
It was obvious to Google then and is obvious to us now that the road to zero latency at the cost of compromising security and interoperability is a dead end under the 1:1 connection model, which is why Google pursued connection persistence and 1:N connection multiplexing in HTTP/2 and more radical overhaul of HTTP/TLS/TCP in QUIC. In a 1:N connection model, the cost of setting up the first connection is amortized, and the following connections cost nothing to set up without any security or stability compromises, and the race to zero connection latency becomes irrelevant.
Complex, battle-tested logic for connection management was [implemented](https://web.archive.org/web/20161222115511/https://insouciant.org/tech/connection-management-in-chromium/) in Chromium. The same thing is not so easy to do again from scratch with the aforementioned languages.
### Why not reinvent cryptos?
Because the first rule of cryptography is: [Don't roll your](http://loup-vaillant.fr/articles/rolling-your-own-crypto) [own cryptos](https://security.stackexchange.com/questions/18197/why-shouldnt-we-roll-our-own).
If you do roll your own cryptos, see what [happened](https://groups.google.com/d/msg/traffic-obf/CWO0peBJLGc/Py-clLSTBwAJ) with Shadowsocks. (Spoiler: it encrypts, but doesn't authenticate, leading to active probing exploits, and more exploits after duct-tape fixes.)
### Why not use HTTP/2 proxy from browser directly?
You may have wondered why not use Chrome directly if NaïveProxy reuses Chrome's network stack. The answer is yes, you can. You will get 80% of what NaïveProxy does (TLS, connection multiplexing, application fronting) without NaïveProxy, which is also what makes NaïveProxy indistinguishable from normal traffic. Simply point your browser to Caddy as an HTTP/2 or HTTP/3 forward proxy directly.
But this setup is prone to basic traffic analysis due to lack of obfuscation and predictable packet sizes in TLS handshakes. [The bane of "TLS-in-TLS" tunnels](http://blog.zorinaq.com/my-experience-with-the-great-firewall-of-china/) is that this combination is just so different from any normal protocols (nobody does 3-way handshakes twice in a row) and the record sizes of TLS handshakes are so predictable that no machine learning is needed to [detect it](https://github.com/shadowsocks/shadowsocks-org/issues/86#issuecomment-362809854).
The browser will introduce an extra 1RTT delay during proxied connection setup because of its interpretation of HTTP RFCs. The browser will wait for a 200 response after a CONNECT request, incurring unnecessary latency. NaïveProxy does HTTP Fast CONNECT similar to TCP Fast Open, i.e. sending subsequent data immediately after CONNECT without this 1RTT delay. Also, you may have to type in the password for the proxy every time you open the browser. NaïveProxy sends the password automatically.
Thus, traffic obfuscation, HTTP Fast CONNECT, and auto-authentication are the crucial last 20% provided by NaïveProxy. These can't be really achieved inside Chrome as extensions/apps because they don't have access to sockets. So instead, NaïveProxy extracts Chromium's network stack without all the other baggage to build a small binary (4% of a full Chrome build).
But if you don't need the best performance, and unobfuscated TLS-in-TLS somehow still works for you, you can just keep using Caddy proxy with your browser.
### Why no "CDN support"?
Take Cloudflare for example. https://www.cloudflare.com/terms/ says: "Use of the Service for serving video (unless purchased separately as a Paid Service) or a disproportionate percentage of pictures, audio files, or other non-HTML content, is prohibited." Proxying traffic is definitely prohibited by the terms in this context.

95
USAGE.txt Normal file
View File

@ -0,0 +1,95 @@
Usage: naive --listen=... --proxy=...
naive [/path/to/config.json]
Description:
naive is a proxy that transports traffic in Chromium's pattern.
It works as both a proxy client and a proxy server or together.
Options in the form of `naive --listen=... --proxy=...` can also be
specified using a JSON file:
{
"listen": "...",
"proxy": "..."
}
Uses "config.json" by default if run without arguments.
Options:
-h, --help
Shows help message.
--version
Prints version.
--listen=<proto>://[addr][:port]
--listen=socks://[[user]:[pass]@][addr][:port]
Listens at addr:port with protocol <proto>.
Available proto: socks, http, redir.
Default proto, addr, port: socks, 0.0.0.0, 1080.
* http: Supports only proxying https:// URLs, no http://.
* redir: Works with certain iptables setup.
(Redirecting locally originated traffic)
iptables -t nat -A OUTPUT -d $proxy_server_ip -j RETURN
iptables -t nat -A OUTPUT -p tcp -j REDIRECT --to-ports 1080
(Redirecting forwarded traffic on a router)
iptables -t nat -A PREROUTING -p tcp -j REDIRECT --to-ports 1080
Also activates a DNS resolver on the same UDP port. Similar iptables
rules can redirect DNS queries to this resolver. The resolver returns
artificial addresses that are translated back to the original domain
names in proxy requests and then resolved remotely.
The artificial results are not saved for privacy, so restarting the
resolver may cause downstream to cache stale results.
--proxy=<proto>://<user>:<pass>@<hostname>[:<port>]
Routes traffic via the proxy server. Connects directly by default.
Available proto: https, quic. Infers port by default.
--insecure-concurrency=<N>
Use N concurrent tunnel connections to be more robust under bad network
conditions. More connections make the tunneling easier to detect and less
secure. This project strives for the strongest security against traffic
analysis. Using it in an insecure way defeats its purpose.
If you must use this, try N=2 first to see if it solves your issues.
Strongly recommend against using more than 4 connections here.
--extra-headers=...
Appends extra headers in requests to the proxy server.
Multiple headers are separated by CRLF.
--host-resolver-rules="MAP proxy.example.com 1.2.3.4"
Statically resolves a domain name to an IP address.
--resolver-range=CIDR
Uses this range in the builtin resolver. Default: 100.64.0.0/10.
--log=[<path>]
Saves log to the file at <path>. If path is empty, prints to
console. No log is saved or printed by default for privacy.
--log-net-log=<path>
Saves NetLog. View at https://netlog-viewer.appspot.com/.
--ssl-key-log-file=<path>
Saves SSL keys for Wireshark inspection.

View File

@ -53,6 +53,7 @@ group("gn_all") {
deps = [
":gn_visibility",
"//net",
"//components/cronet",
]
}

87
src/build.sh Executable file
View File

@ -0,0 +1,87 @@
#!/bin/sh
set -e
export TMPDIR="$PWD/tmp"
rm -rf "$TMPDIR"
mkdir -p "$TMPDIR"
if [ "$1" = debug ]; then
out=out/Debug
flags="
is_debug=true
is_component_build=true"
else
out=out/Release
flags="
is_official_build=true
exclude_unwind_tables=true
enable_resource_allowlist_generation=false
symbol_level=0"
fi
. ./get-sysroot.sh
if [ "$CCACHE" ]; then
flags="$flags
cc_wrapper=\"$CCACHE\""
fi
flags="$flags"'
is_clang=true
use_sysroot=false
fatal_linker_warnings=false
treat_warnings_as_errors=false
enable_base_tracing=false
use_udev=false
use_aura=false
use_ozone=false
use_gio=false
use_gtk=false
use_platform_icu_alternatives=true
use_glib=false
disable_file_support=true
enable_websockets=false
use_kerberos=false
enable_mdns=false
enable_reporting=false
include_transport_security_state_preload_list=false
use_nss_certs=false
'
if [ "$WITH_SYSROOT" ]; then
flags="$flags
target_sysroot=\"//$WITH_SYSROOT\""
fi
if [ "$USE_AFDO" ]; then
flags="$flags"'
clang_sample_profile_path="//chrome/android/profiles/afdo.prof"'
fi
if [ "$ARCH" = "Darwin" ]; then
flags="$flags"'
enable_dsyms=false'
fi
if [ "$target_cpu" = "mipsel" -o "$target_cpu" = "mips64el" ]; then
flags="$flags"'
use_thin_lto=false
chrome_pgo_phase=0'
fi
rm -rf "./$out"
mkdir -p out
export DEPOT_TOOLS_WIN_TOOLCHAIN=0
./gn/out/gn gen "$out" --args="$flags $EXTRA_FLAGS" --script-executable=$PYTHON
ninja -C "$out" naive
if echo "$EXTRA_FLAGS" | grep -vq "build_static=true"; then
ninja -C "$out" cronet cronet_static
./make-cronet-cgo-sdk.sh
fi

View File

@ -3,6 +3,7 @@
# found in the LICENSE file.
import("//build/buildflag_header.gni")
import("//build/config/c++/c++.gni")
import("//build/toolchain/toolchain.gni")
import("//build/util/lastchange.gni")
import("//build/util/process_version.gni")
@ -76,7 +77,7 @@ source_set("metrics_util") {
# For platforms on which the native Cronet library is used, build the library,
# a cronet_tests binary that exercises it, and a unit-tests binary.
# Android and iOS have their own platform-specific rules to build Cronet.
if (is_android) {
if (false) {
group("cronet_package") {
testonly = true
deps = [ "//components/cronet/android:cronet_package_android" ]
@ -86,6 +87,116 @@ if (is_android) {
deps = [ "//components/cronet/ios:cronet_package_ios" ]
}
} else {
static_library("cronet_static") {
deps = [
"//base",
"//components/cronet:cronet_common",
"//components/cronet/native:cronet_native_impl",
"//net",
]
if (use_custom_libcxx) {
deps += [
# Add shared_library_deps to include custom libc++ into dependencies.
# They are by default only added to executable(), loadable_module(), and
# shared_library() targets, but cronet_static_complete library needs it as well to
# avoid linking with different versions of libc++.
"//build/config:shared_library_deps",
]
}
sources = [ "cronet_global_state_stubs.cc" ]
complete_static_lib = true
configs -= [ "//build/config/compiler:thin_archive" ]
}
executable("cronet_example") {
testonly = true
sources = [ "native/sample/bidi_example.cc" ]
deps = [
"//components/cronet",
"//components/cronet/native:cronet_native_headers",
]
if ((is_linux || is_chromeos) && !is_component_build) {
public_configs = [ "//build/config/gcc:rpath_for_built_shared_libraries" ]
}
}
executable("cronet_example_external") {
testonly = true
no_default_deps = true
sources = [ "native/sample/bidi_example.cc" ]
deps = [ ":cronet" ]
configs -= [
"//build/config:feature_flags",
"//build/config/compiler:afdo",
"//build/config/compiler:afdo_optimize_size",
"//build/config/compiler:cet_shadow_stack",
"//build/config/compiler:chromium_code",
"//build/config/compiler:default_include_dirs",
"//build/config/compiler:default_init_stack_vars",
"//build/config/compiler:default_optimization",
"//build/config/compiler:default_stack_frames",
"//build/config/compiler:default_symbols",
"//build/config/compiler:no_exceptions",
"//build/config/compiler:no_rtti",
"//build/config/compiler:prevent_unsafe_narrowing",
"//build/config/compiler:runtime_library",
"//build/config/compiler:thin_archive",
"//build/config/compiler/pgo:default_pgo_flags",
"//build/config/coverage:default_coverage",
"//build/config/sanitizers:default_sanitizer_flags",
"//build/config/clang:find_bad_constructs",
"//build/config/clang:extra_warnings",
]
configs += [ "//components/cronet/native:cronet_native_include_config" ]
if (is_linux) {
configs += [ "//build/config/linux:runtime_library" ]
} else if (is_mac) {
configs += [ "//build/config/mac:runtime_library" ]
} else if (is_android) {
configs += [ "//build/config/android:runtime_library" ]
}
}
executable("cronet_example_external_static") {
testonly = true
no_default_deps = true
sources = [ "native/sample/bidi_example.cc" ]
deps = [ ":cronet_static" ]
configs -= [
"//build/config:feature_flags",
"//build/config/compiler:afdo",
"//build/config/compiler:afdo_optimize_size",
"//build/config/compiler:cet_shadow_stack",
"//build/config/compiler:chromium_code",
"//build/config/compiler:default_include_dirs",
"//build/config/compiler:default_init_stack_vars",
"//build/config/compiler:default_optimization",
"//build/config/compiler:default_stack_frames",
"//build/config/compiler:default_symbols",
"//build/config/compiler:no_exceptions",
"//build/config/compiler:no_rtti",
"//build/config/compiler:prevent_unsafe_narrowing",
"//build/config/compiler:runtime_library",
"//build/config/compiler:thin_archive",
"//build/config/compiler/pgo:default_pgo_flags",
"//build/config/coverage:default_coverage",
"//build/config/sanitizers:default_sanitizer_flags",
"//build/config/clang:find_bad_constructs",
"//build/config/clang:extra_warnings",
]
configs += [ "//components/cronet/native:cronet_native_include_config" ]
if (is_linux) {
configs += [ "//build/config/linux:runtime_library" ]
} else if (is_mac) {
configs += [ "//build/config/mac:runtime_library" ]
} else if (is_android) {
configs += [ "//build/config/android:runtime_library" ]
}
}
config("shared_library_public_config") {
if (is_mac && !is_component_build) {
# Executable targets that depend on the shared libraries below need to have
@ -94,7 +205,7 @@ if (is_android) {
}
}
_cronet_shared_lib_name = "cronet.$chrome_version_full"
_cronet_shared_lib_name = "cronet"
_cronet_shared_lib_file_name =
"$shlib_prefix$_cronet_shared_lib_name$shlib_extension"

View File

@ -146,7 +146,8 @@ void PostTaskToInitThread(const base::Location& posted_from,
g_init_task_executor->task_runner()->PostTask(posted_from, std::move(task));
}
void EnsureInitialized() {
void EnsureInitialized(const char* /*enable_features*/,
const char* /*disable_features*/) {
if (g_init_task_executor) {
// Ensure that init is done on the init thread.
g_init_thread_init_done.Wait();

View File

@ -12,6 +12,7 @@
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include "base/base64.h"
@ -53,7 +54,10 @@
#include "net/log/net_log_util.h"
#include "net/net_buildflags.h"
#include "net/nqe/network_quality_estimator_params.h"
#include "net/proxy_resolution/configured_proxy_resolution_service.h"
#include "net/proxy_resolution/proxy_config.h"
#include "net/proxy_resolution/proxy_config_service_fixed.h"
#include "net/proxy_resolution/proxy_config_with_annotation.h"
#include "net/proxy_resolution/proxy_resolution_service.h"
#include "net/third_party/quiche/src/quic/core/quic_versions.h"
#include "net/url_request/url_request_context.h"
@ -352,9 +356,25 @@ CronetContext::NetworkTasks::BuildDefaultURLRequestContext(
net::URLRequestContextBuilder context_builder;
SetSharedURLRequestContextBuilderConfig(&context_builder);
context_builder.set_proxy_resolution_service(
cronet::CreateProxyResolutionService(std::move(proxy_config_service),
g_net_log.Get().net_log()));
const auto proxy_server_it =
context_config_->effective_experimental_options.find("proxy_server");
std::string proxy_server_str = "direct://";
if (proxy_server_it !=
context_config_->effective_experimental_options.end()) {
const base::Value& value = proxy_server_it->second;
if (value.is_string()) {
proxy_server_str = value.GetString();
}
}
net::ProxyConfig proxy_config;
proxy_config.proxy_rules().ParseFromString(proxy_server_str);
auto proxy_service =
net::ConfiguredProxyResolutionService::CreateWithoutProxyResolver(
std::make_unique<net::ProxyConfigServiceFixed>(
net::ProxyConfigWithAnnotation(proxy_config,
MISSING_TRAFFIC_ANNOTATION)),
g_net_log.Get().net_log());
context_builder.set_proxy_resolution_service(std::move(proxy_service));
if (context_config_->enable_network_quality_estimator) {
std::unique_ptr<net::NetworkQualityEstimatorParams> nqe_params =

View File

@ -31,7 +31,8 @@ void PostTaskToInitThread(const base::Location& posted_from,
// or binding to an existing thread, to run initialization and process
// network notifications on. The implementation must be thread-safe and
// idempotent, and must complete initialization before returning.
void EnsureInitialized();
void EnsureInitialized(const char* enable_features = nullptr,
const char* disable_features = nullptr);
// Creates a proxy config service appropriate for this platform that fetches the
// system proxy settings. Cronet will call this API only after a prior call

View File

@ -22,14 +22,18 @@ namespace cronet {
namespace {
scoped_refptr<base::SingleThreadTaskRunner> InitializeAndCreateTaskRunner() {
scoped_refptr<base::SingleThreadTaskRunner> InitializeAndCreateTaskRunner(
const char* enable_features,
const char* disable_features) {
// Cronet tests sets AtExitManager as part of TestSuite, so statically linked
// library is not allowed to set its own.
#if !defined(CRONET_TESTS_IMPLEMENTATION)
std::ignore = new base::AtExitManager;
#endif
base::FeatureList::InitializeInstance(std::string(), std::string());
base::FeatureList::InitializeInstance(
enable_features ? enable_features : "",
disable_features ? disable_features : "");
// Note that in component builds this ThreadPoolInstance will be shared with
// the calling process, if it also depends on //base. In particular this means
@ -40,16 +44,19 @@ scoped_refptr<base::SingleThreadTaskRunner> InitializeAndCreateTaskRunner() {
return base::ThreadPool::CreateSingleThreadTaskRunner({});
}
base::SingleThreadTaskRunner* InitTaskRunner() {
base::SingleThreadTaskRunner* InitTaskRunner(
const char* enable_features = nullptr,
const char* disable_features = nullptr) {
static scoped_refptr<base::SingleThreadTaskRunner> init_task_runner =
InitializeAndCreateTaskRunner();
InitializeAndCreateTaskRunner(enable_features, disable_features);
return init_task_runner.get();
}
} // namespace
void EnsureInitialized() {
std::ignore = InitTaskRunner();
void EnsureInitialized(const char* enable_features,
const char* disable_features) {
std::ignore = InitTaskRunner(enable_features, disable_features);
}
bool OnInitThread() {
@ -63,15 +70,13 @@ void PostTaskToInitThread(const base::Location& posted_from,
std::unique_ptr<net::ProxyConfigService> CreateProxyConfigService(
const scoped_refptr<base::SequencedTaskRunner>& io_task_runner) {
return net::ConfiguredProxyResolutionService::CreateSystemProxyConfigService(
io_task_runner);
return nullptr;
}
std::unique_ptr<net::ProxyResolutionService> CreateProxyResolutionService(
std::unique_ptr<net::ProxyConfigService> proxy_config_service,
net::NetLog* net_log) {
return net::ConfiguredProxyResolutionService::CreateUsingSystemProxyResolver(
std::move(proxy_config_service), net_log, /*quick_check_enabled=*/true);
return nullptr;
}
std::string CreateDefaultUserAgent(const std::string& partial_user_agent) {

View File

@ -7,6 +7,7 @@
#include <unordered_set>
#include <utility>
#include "base/base_switches.h"
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/files/file_path.h"
@ -102,6 +103,25 @@ Cronet_EngineImpl::~Cronet_EngineImpl() {
Cronet_RESULT Cronet_EngineImpl::StartWithParams(
Cronet_EngineParamsPtr params) {
absl::optional<base::Value::DictStorage> experimental_options =
URLRequestContextConfig::ParseExperimentalOptions(
params->experimental_options);
if (experimental_options) {
const auto& iter = experimental_options->find("feature_list");
if (iter != experimental_options->end()) {
const base::Value& feature_list = iter->second;
if (feature_list.is_dict()) {
const std::string* enable_features =
feature_list.GetDict().FindString(switches::kEnableFeatures);
const std::string* disable_features =
feature_list.GetDict().FindString(switches::kDisableFeatures);
cronet::EnsureInitialized(
enable_features ? enable_features->c_str() : nullptr,
disable_features ? disable_features->c_str() : nullptr);
}
}
}
cronet::EnsureInitialized();
base::AutoLock lock(lock_);

View File

@ -0,0 +1,153 @@
#!/bin/sh
set -ex
WINDOWS_LLVM_VERSION=13.0.1
MAC_SDK_VERSION=12.1
# CGO does not support relative path very well. TODO: better way to handle this using SRCDIR?
# for i in go_env.sh link_shared.go link_static.go; do
# sed "s#\./sysroot#$PWD/sysroot#g" $i >$i.1
# mv $i.1 $i
# done
# Imports environment variables: GOOS, GOARCH, GOMIPS, CGO_CFLAGS, CGO_LDFLAGS
# Imports bash variables: ARCH, target_cpu, CLANG_REVISION, WITH_CLANG, WITH_QEMU, WITH_ANDROID_IMG, buildmode_flag
. ./go_env.sh
if [ "$ARCH" = 'Windows' ]; then
alias ln='MSYS=winsymlinks:nativestrict ln'
exe_extension=.exe
fi
# Gets LLVM
# Recommends copying cached content to here to avoid repeatedly downloading from googleapis.com.
if [ ! -d ./llvm/bin ]; then
if [ -d ../../../third_party/llvm-build/Release+Asserts/bin ]; then
# Reuses existing toolchain if running in naiveproxy build tree
ln -sfn $PWD/../../../third_party/llvm-build/Release+Asserts ./llvm
else
mkdir -p ./llvm
clang_path="clang-$CLANG_REVISION.tgz"
clang_url="https://commondatastorage.googleapis.com/chromium-browser-clang/$WITH_CLANG/$clang_path"
curl "$clang_url" | tar xzf - -C ./llvm
fi
fi
if [ "$ARCH" = 'Windows' ]; then
ln -sfn "C:/Program Files/LLVM/lib/clang/$WINDOWS_LLVM_VERSION" ./llvm/lib/clang/
cat >lld-link.cc <<EOF
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <string>
int main(int argc, char** argv) {
std::string cmd = "lld-link-old";
for (int i = 1; i < argc; ++i) {
fprintf(stderr, "argv[%d]: %s\n", i, argv[i]);
if (strcmp(argv[i], "--tsaware") == 0) continue;
if (strcmp(argv[i], "--nxcompat") == 0) continue;
if (strstr(argv[i], "--major-os-version=") == argv[i]) continue;
if (strstr(argv[i], "--minor-os-version=") == argv[i]) continue;
if (strstr(argv[i], "--major-subsystem-version=") == argv[i]) continue;
if (strstr(argv[i], "--minor-subsystem-version=") == argv[i]) continue;
if (strcmp(argv[i], "--dynamicbase") == 0) continue;
if (strcmp(argv[i], "--high-entropy-va") == 0) continue;
if (strcmp(argv[i], "-T") == 0) {
++i;
continue;
}
if (strcmp(argv[i], "--start-group") == 0) continue;
if (strcmp(argv[i], "mingwex.lib") == 0) continue;
if (strcmp(argv[i], "mingw32.lib") == 0) continue;
if (strcmp(argv[i], "--end-group") == 0) continue;
if (strchr(argv[i], ' ') != nullptr) {
cmd.append(" \"").append(argv[i]).append("\"");
} else {
cmd.append(" ").append(argv[i]);
}
}
fprintf(stderr, "cmd: %s\n", cmd.c_str());
return system(cmd.c_str());
}
EOF
if [ ! -f ./llvm/bin/lld-link-old.exe ]; then
cp 'C:\Program Files\LLVM\bin\clang.exe' ./llvm/bin/
mv ./llvm/bin/lld-link.exe ./llvm/bin/lld-link-old.exe
clang lld-link.cc -o ./llvm/bin/lld-link.exe
fi
fi
# Finds Mac SDK path for sysroot, following build/mac/find_sdk.py.
if [ "$ARCH" = 'Darwin' ]; then
mac_sdk_path="$(xcode-select -print-path)"/Platforms/MacOSX.platform/Developer/SDKs/MacOSX$MAC_SDK_VERSION.sdk
if [ ! -e "$mac_sdk_path" ]; then
echo 'MacOS SDK not found'
exit 1
fi
ln -sfn "$mac_sdk_path" ./sysroot
fi
export PATH="$PWD/llvm/bin:$PATH"
export CC=clang
export CGO_ENABLED=1
export CGO_LDFLAGS_ALLOW=.*
run_cronet_example() {
local rootfs=./sysroot
if [ "$WITH_ANDROID_IMG" ]; then
# Only supports testing in tree because the rootfs is very large and the script to download it is not trivial.
rootfs="$PWD/../../../out/sysroot-build/android/$WITH_ANDROID_IMG"
if [ ! -d "$rootfs" ]; then
echo 'Skips testing cronet_example due to missing Android rootfs'
return
fi
fi
if [ "$WITH_QEMU" ]; then
if [ "$target_cpu" = "x64" -o "$target_cpu" = "x86" ]; then
cp libcronet.so cronet_example "$rootfs"
bwrap --bind "$rootfs" / --proc /proc --dev /dev --setenv LD_LIBRARY_PATH / /cronet_example "$@"
rm -f "$rootfs"/libcronet.so "$rootfs"/cronet_example
else
# Older qemu-user cannot run CGO binaries in MIPS, see https://github.com/golang/go/issues/33746.
# Newer qemu-user-static can be separately installed.
qemu-$WITH_QEMU-static -L "$rootfs" ./cronet_example "$@"
fi
elif [ "$target_cpu" = "arm64" -a "$ARCH" = "Darwin" ]; then
echo 'Skips testing cronet_example'
elif [ "$target_cpu" = "arm64" -a "$ARCH" = "Windows" ]; then
echo 'Skips testing cronet_example'
else
./cronet_example "$@"
fi
}
go build $buildmode_flag cronet_example.go link_shared.go
run_cronet_example http://example.com
if [ "$ARCH" = "Linux" ]; then
./llvm/bin/llvm-strip cronet_example
fi
ls -l cronet_example${exe_extension}
rm -f cronet_example${exe_extension}
go build $buildmode_flag cronet_example.go link_static.go
run_cronet_example http://example.com
if [ "$ARCH" = "Linux" ]; then
./llvm/bin/llvm-strip cronet_example
fi
ls -l cronet_example${exe_extension}
rm -f cronet_example${exe_extension}
# If it is a symlink, it points to pre-installed toolchain. Avoids packaging it.
if [ -h ./sysroot ]; then
rm -f ./sysroot
fi
# If it is a symlink, it points to pre-installed toolchain. Avoids packaging it.
if [ -h ./llvm ]; then
rm -f ./llvm
fi
# Generated by ThinLTO linking.
rm -rf ./thinlto-cache

View File

@ -0,0 +1,155 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <atomic>
#include <chrono>
#include <cstdio>
#include <thread>
#include "bidirectional_stream_c.h"
#include "cronet_c.h"
class BidirectionalStreamCallback {
public:
bidirectional_stream* stream = nullptr;
char read_buffer[10240];
std::atomic<bool> done = false;
bidirectional_stream_callback* callback() const { return &s_callback; }
private:
static BidirectionalStreamCallback* FromStream(bidirectional_stream* stream) {
return reinterpret_cast<BidirectionalStreamCallback*>(stream->annotation);
}
// C callbacks.
static void on_stream_ready_callback(bidirectional_stream* stream) {
puts("on_stream_ready_callback");
}
static void on_response_headers_received_callback(
bidirectional_stream* stream,
const bidirectional_stream_header_array* headers,
const char* negotiated_protocol) {
printf("on_response_headers_received_callback negotiated_protocol=%s\n",
negotiated_protocol);
BidirectionalStreamCallback* self = FromStream(stream);
for (size_t i = 0; i < headers->count; ++i) {
if (headers->headers[i].key[0] == '\0')
continue;
printf("%s: %s\n", headers->headers[i].key, headers->headers[i].value);
}
bidirectional_stream_read(stream, self->read_buffer,
sizeof(self->read_buffer));
}
static void on_read_completed_callback(bidirectional_stream* stream,
char* data,
int count) {
printf("on_read_completed_callback %d\n", count);
BidirectionalStreamCallback* self = FromStream(stream);
if (count == 0)
return;
fwrite(data, 1, count, stdout);
puts("");
bidirectional_stream_read(stream, self->read_buffer,
sizeof(self->read_buffer));
}
static void on_write_completed_callback(bidirectional_stream* stream,
const char* data) {
puts("on_write_completed_callback");
}
static void on_response_trailers_received_callback(
bidirectional_stream* stream,
const bidirectional_stream_header_array* trailers) {
puts("on_response_trailers_received_callback");
for (size_t i = 0; i < trailers->count; ++i) {
printf("%s: %s\n", trailers->headers[i].key, trailers->headers[i].value);
}
}
static void on_succeded_callback(bidirectional_stream* stream) {
puts("on_succeded_callback");
BidirectionalStreamCallback* self = FromStream(stream);
self->done = true;
}
static void on_failed_callback(bidirectional_stream* stream, int net_error) {
printf("on_failed_callback %d\n", net_error);
BidirectionalStreamCallback* self = FromStream(stream);
self->done = true;
}
static void on_canceled_callback(bidirectional_stream* stream) {
puts("on_canceled_callback");
BidirectionalStreamCallback* self = FromStream(stream);
self->done = true;
}
static bidirectional_stream_callback s_callback;
};
bidirectional_stream_callback BidirectionalStreamCallback::s_callback = {
on_stream_ready_callback,
on_response_headers_received_callback,
on_read_completed_callback,
on_write_completed_callback,
on_response_trailers_received_callback,
on_succeded_callback,
on_failed_callback,
on_canceled_callback,
};
Cronet_EnginePtr CreateCronetEngine() {
Cronet_EnginePtr cronet_engine = Cronet_Engine_Create();
Cronet_EngineParamsPtr engine_params = Cronet_EngineParams_Create();
Cronet_EngineParams_user_agent_set(engine_params, "Cronet");
Cronet_EngineParams_experimental_options_set(engine_params, R"({
"ssl_key_log_file": "/tmp/keys",
"feature_list": {
"enable-features": "PartitionConnectionsByNetworkIsolationKey"
},
"proxy_server": "socks5://127.0.0.1:1080"
})");
Cronet_Engine_StartWithParams(cronet_engine, engine_params);
Cronet_EngineParams_Destroy(engine_params);
return cronet_engine;
}
int main(int argc, const char* argv[]) {
if (argc != 2) {
printf("Usage: %s url\n", argv[0]);
return 1;
}
const char* url = argv[1];
Cronet_EnginePtr cronet_engine = CreateCronetEngine();
stream_engine* cronet_stream_engine =
Cronet_Engine_GetStreamEngine(cronet_engine);
Cronet_Engine_StartNetLogToFile(cronet_engine, "/tmp/log.json", true);
BidirectionalStreamCallback stream_callback;
stream_callback.stream = bidirectional_stream_create(
cronet_stream_engine, &stream_callback, stream_callback.callback());
bidirectional_stream_header headers[] = {
{"-network-isolation-key", "http://a"},
};
const bidirectional_stream_header_array headers_array = {1, 1, headers};
if (bidirectional_stream_start(stream_callback.stream, url, 0, "GET",
&headers_array, true) < 0) {
stream_callback.done = true;
}
puts("bidirectional_stream_start");
while (!stream_callback.done) {
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
bidirectional_stream_destroy(stream_callback.stream);
Cronet_Engine_StopNetLog(cronet_engine);
Cronet_Engine_Shutdown(cronet_engine);
Cronet_Engine_Destroy(cronet_engine);
return 0;
}

View File

@ -0,0 +1,133 @@
package main
// #include <stdbool.h>
// #include <stdlib.h>
// #include "cronet_c.h"
// #include "bidirectional_stream_c.h"
// extern void _on_stream_ready(bidirectional_stream* stream);
// extern void _on_response_headers_received(bidirectional_stream* stream, bidirectional_stream_header_array* headers, char* negotiated_protocol);
// extern void _on_read_completed(bidirectional_stream* stream, char* data, int bytes_read);
// extern void _on_write_completed(bidirectional_stream* stream, char* data);
// extern void _on_response_trailers_received(bidirectional_stream* stream, bidirectional_stream_header_array* trailers);
// extern void _on_succeded(bidirectional_stream* stream);
// extern void _on_failed(bidirectional_stream* stream, int net_error);
// extern void _on_canceled(bidirectional_stream* stream);
import "C"
import (
"log"
"os"
"strconv"
"sync"
"unsafe"
)
var wait sync.WaitGroup
var readBuffer unsafe.Pointer
func main() {
cronetEngine := C.Cronet_Engine_Create()
engineParams := C.Cronet_EngineParams_Create()
userAgentC := C.CString("Cronet")
C.Cronet_EngineParams_user_agent_set(engineParams, userAgentC)
C.Cronet_Engine_StartWithParams(cronetEngine, engineParams)
C.free(unsafe.Pointer(userAgentC))
C.Cronet_EngineParams_Destroy(engineParams)
streamEngine := C.Cronet_Engine_GetStreamEngine(cronetEngine)
var callback C.bidirectional_stream_callback
callback.on_stream_ready = (*[0]byte)(C._on_stream_ready)
callback.on_response_headers_received = (*[0]byte)(C._on_response_headers_received)
callback.on_read_completed = (*[0]byte)(C._on_read_completed)
callback.on_response_trailers_received = (*[0]byte)(C._on_response_trailers_received)
callback.on_succeded = (*[0]byte)(C._on_succeded)
callback.on_failed = (*[0]byte)(C._on_failed)
callback.on_canceled = (*[0]byte)(C._on_canceled)
stream := C.bidirectional_stream_create(streamEngine, nil, &callback)
url := C.CString(os.Args[1])
defer C.free(unsafe.Pointer(url))
method := C.CString("GET")
defer C.free(unsafe.Pointer(method))
readBuffer = C.malloc(32768)
wait.Add(1)
C.bidirectional_stream_start(stream, url, 0, method, nil, true)
wait.Wait()
C.free(readBuffer)
C.bidirectional_stream_destroy(stream)
C.Cronet_Engine_Shutdown(cronetEngine)
C.Cronet_Engine_Destroy(cronetEngine)
}
//export _on_stream_ready
func _on_stream_ready(stream *C.bidirectional_stream) {
log.Println("on_stream_ready_callback")
}
//export _on_response_headers_received
func _on_response_headers_received(stream *C.bidirectional_stream, headers *C.bidirectional_stream_header_array, negotiated_protocol *C.char) {
log.Println("on_response_headers_received, negotiated_protocol=", C.GoString(negotiated_protocol))
var hdrP *C.bidirectional_stream_header
hdrP = headers.headers
headersSlice := unsafe.Slice(hdrP, int(headers.count))
for _, header := range headersSlice {
key := C.GoString(header.key)
if len(key) == 0 {
continue
}
value := C.GoString(header.value)
log.Println(key + ": " + value)
}
C.bidirectional_stream_read(stream, (*C.char)(readBuffer), 32768)
}
//export _on_read_completed
func _on_read_completed(stream *C.bidirectional_stream, data *C.char, bytesRead C.int) {
log.Println("on_read_completed")
dataSlice := C.GoBytes(readBuffer, bytesRead)
log.Println(string(dataSlice))
C.bidirectional_stream_read(stream, (*C.char)(readBuffer), 32768)
}
//export _on_write_completed
func _on_write_completed(stream *C.bidirectional_stream, data *C.char) {
log.Println("on_write_completed")
}
//export _on_response_trailers_received
func _on_response_trailers_received(stream *C.bidirectional_stream, trailers *C.bidirectional_stream_header_array) {
log.Println("on_response_trailers_received")
}
//export _on_succeded
func _on_succeded(stream *C.bidirectional_stream) {
log.Println("on_succeded")
wait.Done()
}
//export _on_failed
func _on_failed(stream *C.bidirectional_stream, net_error C.int) {
log.Println("on_failed")
log.Println("net error ", strconv.Itoa(int(net_error)))
wait.Done()
}
//export _on_canceled
func _on_canceled(stream *C.bidirectional_stream) {
log.Println("on_canceled")
wait.Done()
}

View File

@ -758,6 +758,23 @@ void URLRequestContextConfig::SetContextBuilderExperimentalOptions(
continue;
}
session_params->spdy_go_away_on_ip_change = iter.second.GetBool();
} else if (iter.first == "proxy_server") {
if (!iter.second.is_string()) {
LOG(ERROR) << "\"" << iter.first << "\" config params \"" << iter.second
<< "\" is not a string";
effective_experimental_options.erase(iter.first);
continue;
}
// Handled in CronetContext::NetworkTasks::BuildDefaultURLRequestContext.
} else if (iter.first == "feature_list") {
if (!iter.second.is_dict()) {
LOG(ERROR) << "\"" << iter.first << "\" config params \"" << iter.second
<< "\" is not a dictionary value";
effective_experimental_options.erase(iter.first);
continue;
}
// Already handled in Cronet_EngineImpl::StartWithParams.
// Only checks and reports errors here.
} else {
LOG(WARNING) << "Unrecognized Cronet experimental option \"" << iter.first
<< "\" with params \"" << iter.second;

View File

@ -216,6 +216,12 @@ struct URLRequestContextConfig {
// not specify for other targets.
absl::optional<double> network_thread_priority);
// Parses experimental options from their JSON format to the format used
// internally.
// Returns an empty optional if the operation was unsuccessful.
static absl::optional<base::Value::DictStorage> ParseExperimentalOptions(
std::string unparsed_experimental_options);
private:
URLRequestContextConfig(
// Enable QUIC.
@ -253,12 +259,6 @@ struct URLRequestContextConfig {
// not specify for other targets.
absl::optional<double> network_thread_priority);
// Parses experimental options from their JSON format to the format used
// internally.
// Returns an empty optional if the operation was unsuccessful.
static absl::optional<base::Value::DictStorage> ParseExperimentalOptions(
std::string unparsed_experimental_options);
// Makes appropriate changes to settings in |this|.
void SetContextConfigExperimentalOptions();

View File

@ -21,6 +21,7 @@
#include "net/base/io_buffer.h"
#include "net/base/net_errors.h"
#include "net/base/request_priority.h"
#include "net/base/schemeful_site.h"
#include "net/http/bidirectional_stream.h"
#include "net/http/bidirectional_stream_request_info.h"
#include "net/http/http_network_session.h"
@ -96,8 +97,20 @@ int BidirectionalStream::Start(const char* url,
request_info->priority = static_cast<net::RequestPriority>(priority);
// Http method is a token, just as header name.
request_info->method = method;
if (!net::HttpUtil::IsValidHeaderName(request_info->method))
if (!net::HttpUtil::IsValidHeaderName(request_info->method)) {
LOG(ERROR) << "Invalid method " << request_info->method;
return -1;
}
std::string network_isolation_key_header;
if (headers.GetHeader("-network-isolation-key",
&network_isolation_key_header)) {
net::SchemefulSite site(GURL{network_isolation_key_header});
if (site.opaque()) {
LOG(ERROR) << "Invalid -network-isolation-key "
<< network_isolation_key_header;
return -1;
}
}
request_info->extra_headers.CopyFrom(headers);
request_info->end_stream_on_headers = end_of_stream;
write_end_of_stream_ = end_of_stream;

5
src/config.json Normal file
View File

@ -0,0 +1,5 @@
{
"listen": "socks://127.0.0.1:1080",
"proxy": "https://user:pass@domain.example",
"log": ""
}

18
src/get-android-sys.sh Executable file
View File

@ -0,0 +1,18 @@
#!/bin/sh
set -ex
. ./get-sysroot.sh
if [ "$WITH_ANDROID_IMG" -a ! -d out/sysroot-build/android/"$WITH_ANDROID_IMG"/system ]; then
curl -O https://dl.google.com/android/repository/sys-img/android/$WITH_ANDROID_IMG.zip
mkdir -p $WITH_ANDROID_IMG/mount
unzip $WITH_ANDROID_IMG.zip '*/system.img' -d $WITH_ANDROID_IMG
sudo mount $WITH_ANDROID_IMG/*/system.img $WITH_ANDROID_IMG/mount
rootfs=out/sysroot-build/android/$WITH_ANDROID_IMG
mkdir -p $rootfs/system/bin $rootfs/system/etc
cp $WITH_ANDROID_IMG/mount/bin/linker* $rootfs/system/bin
cp $WITH_ANDROID_IMG/mount/etc/hosts $rootfs/system/etc
cp -r $WITH_ANDROID_IMG/mount/lib* $rootfs/system
sudo umount $WITH_ANDROID_IMG/mount
rm -rf $WITH_ANDROID_IMG $WITH_ANDROID_IMG.zip
fi

59
src/get-clang.sh Executable file
View File

@ -0,0 +1,59 @@
#!/bin/sh
set -ex
. ./get-sysroot.sh
if [ "$BUILD_SYSROOT" -a ! -d ./"$WITH_SYSROOT/lib" ]; then
./build/linux/sysroot_scripts/sysroot-creator-sid-naive.sh "$BUILD_SYSROOT"
fi
if [ "$OPENWRT_FLAGS" ]; then
./get-openwrt.sh
fi
if [ ! -d third_party/llvm-build/Release+Asserts/bin ]; then
mkdir -p third_party/llvm-build/Release+Asserts
clang_path="clang-$CLANG_REVISION.tgz"
clang_url="https://commondatastorage.googleapis.com/chromium-browser-clang/$WITH_CLANG/$clang_path"
curl "$clang_url" | tar xzf - -C third_party/llvm-build/Release+Asserts
fi
if [ "$USE_AFDO" -a ! -f chrome/android/profiles/afdo.prof ]; then
afdo_path=$(cat chrome/android/profiles/newest.txt)
afdo_url="https://storage.googleapis.com/chromeos-prebuilt/afdo-job/llvm/$afdo_path"
curl "$afdo_url" | bzip2 -cd >chrome/android/profiles/afdo.prof
fi
if [ "$WITH_PGO" -a ! -f chrome/build/pgo_profiles/"$PGO_PATH" ]; then
mkdir -p chrome/build/pgo_profiles
cd chrome/build/pgo_profiles
curl --limit-rate 10M -LO "https://storage.googleapis.com/chromium-optimization-profiles/pgo_profiles/$PGO_PATH"
cd ../../..
fi
if [ "$USE_SCCACHE" -a ! -f ~/.cargo/bin/sccache.exe ]; then
sccache_url="https://github.com/mozilla/sccache/releases/download/0.2.12/sccache-0.2.12-x86_64-pc-windows-msvc.tar.gz"
mkdir -p ~/.cargo/bin
curl -L "$sccache_url" | tar xzf - --strip=1 -C ~/.cargo/bin
fi
if [ ! -f gn/out/gn ]; then
gn_version=$(grep "'gn_version':" DEPS | cut -d"'" -f4)
mkdir -p gn/out
curl -L "https://chrome-infra-packages.appspot.com/dl/gn/gn/$WITH_GN-amd64/+/$gn_version" -o gn.zip
unzip gn.zip -d gn/out
rm gn.zip
fi
if [ "$USE_ANDROID_NDK" -a ! -d third_party/android_ndk ]; then
android_ndk_version=$(grep android_ndk.git DEPS | cut -d"'" -f10)
git clone --depth=1 --filter=blob:none --no-checkout https://chromium.googlesource.com/android_ndk.git third_party/android_ndk
cd third_party/android_ndk
git sparse-checkout init --cone
git sparse-checkout set sources/android/cpufeatures toolchains/llvm/prebuilt
git -c advice.detachedHead=false checkout "$android_ndk_version"
rm -rf .git
find toolchains -type f -regextype egrep \! -regex \
'.*(lib(atomic|gcc|gcc_real|compiler_rt-extras|android_support|unwind).a|crt.*o|lib(android|c|dl|log|m).so|usr/local.*|usr/include.*)' -delete
cd ../..
fi

54
src/get-openwrt.sh Executable file
View File

@ -0,0 +1,54 @@
#!/bin/sh
set -ex
eval "$OPENWRT_FLAGS"
sysroot=$PWD/out/sysroot-build/openwrt/$release/$arch
if [ -d $sysroot/lib ]; then
exit 0
fi
mkdir -p $sysroot
case "$arch" in
arm_*) abi=musl_eabi;;
*) abi=musl;;
esac
if [ "$subtarget" ]; then
SDK_PATH=openwrt-sdk-$release-$target-${subtarget}_gcc-${gcc_ver}_${abi}.Linux-x86_64
else
subtarget='generic'
SDK_PATH=openwrt-sdk-$release-${target}_gcc-${gcc_ver}_${abi}.Linux-x86_64
fi
SDK_URL=https://downloads.openwrt.org/releases/$release/targets/$target/$subtarget/$SDK_PATH.tar.xz
rm -rf $SDK_PATH
curl $SDK_URL | tar xJf -
cd $SDK_PATH
full_root=staging_dir/toolchain-*_gcc-${gcc_ver}_${abi}
cat >include.txt <<EOF
./include
./lib/*.o
./lib/gcc/*/libgcc.a
./lib/gcc/*/libgcc_eh.a
./lib/libatomic.so*
./lib/libatomic.a
./lib/libc.so
./lib/libc.a
./lib/libdl.a
./lib/ld-*
./lib/libgcc_s.*
./lib/libm.a
./lib/libpthread.a
./lib/libresolv.a
./lib/librt.a
./usr
*.ld.bin
EOF
tar cf - -C $full_root --hard-dereference . | tar xf - -C $sysroot --wildcards --wildcards-match-slash -T include.txt
rm -rf include.txt $SDK_PATH
cd $sysroot/*-openwrt-linux-musl*/bin
case "$arch" in
mips*) mv .ld.bin ld;;
*) rm .ld.bin;;
esac

85
src/get-sysroot.sh Normal file
View File

@ -0,0 +1,85 @@
ARCH=$(uname)
PYTHON=$(which python3 2>/dev/null || which python 2>/dev/null)
CLANG_REVISION=$($PYTHON tools/clang/scripts/update.py --print-revision)
eval "$EXTRA_FLAGS"
case "$ARCH" in
Linux)
if which ccache >/dev/null 2>&1; then
export CCACHE_SLOPPINESS=time_macros
export CCACHE_BASEDIR="$PWD"
export CCACHE_CPP2=yes
CCACHE=ccache
fi
WITH_CLANG=Linux_x64
WITH_PGO=linux
WITH_GN=linux
case "$target_cpu" in
x64) WITH_QEMU=x86_64;;
x86) WITH_QEMU=i386;;
arm64) WITH_QEMU=aarch64;;
arm) WITH_QEMU=arm;;
mipsel) WITH_QEMU=mipsel;;
mips64el) WITH_QEMU=mips64el;;
esac
if [ "$OPENWRT_FLAGS" ]; then
eval "$OPENWRT_FLAGS"
WITH_SYSROOT="out/sysroot-build/openwrt/$release/$arch"
elif [ "$target_os" = android ]; then
WITH_PGO=
USE_AFDO=y
USE_ANDROID_NDK=y
WITH_SYSROOT=
case "$target_cpu" in
x64) WITH_ANDROID_IMG=x86_64-24_r08;;
x86) WITH_ANDROID_IMG=x86-24_r08;;
arm64) WITH_ANDROID_IMG=arm64-v8a-24_r07;;
arm) WITH_ANDROID_IMG=armeabi-v7a-24_r07;;
esac
else
case "$target_cpu" in
x64) sysroot_path=amd64 BUILD_SYSROOT=BuildSysrootAmd64;;
x86) sysroot_path=i386 BUILD_SYSROOT=BuildSysrootI386;;
arm64) sysroot_path=arm64 BUILD_SYSROOT=BuildSysrootARM64;;
arm) sysroot_path=arm BUILD_SYSROOT=BuildSysrootARM;;
mipsel) sysroot_path=mips BUILD_SYSROOT=BuildSysrootMips;;
mips64el) sysroot_path=mips64el BUILD_SYSROOT=BuildSysrootMips64el;;
esac
if [ "$sysroot_path" ]; then
WITH_SYSROOT="out/sysroot-build/sid/sid_${sysroot_path}_staging"
fi
fi
;;
MINGW*|MSYS*)
ARCH=Windows
if [ -f "$HOME"/.cargo/bin/sccache* ]; then
export PATH="$PATH:$HOME/.cargo/bin"
CCACHE=sccache
fi
WITH_CLANG=Win
USE_SCCACHE=y
WITH_GN=windows
case "$target_cpu" in
x64) WITH_PGO=win64;;
*) WITH_PGO=win32;;
esac
;;
Darwin)
if which ccache >/dev/null 2>&1; then
export CCACHE_SLOPPINESS=time_macros
export CCACHE_BASEDIR="$PWD"
export CCACHE_CPP2=yes
CCACHE=ccache
fi
WITH_CLANG=Mac
WITH_GN=mac
case "$target_cpu" in
arm64) WITH_PGO=mac-arm;;
*) WITH_PGO=mac;;
esac
;;
esac
if [ "$WITH_PGO" ]; then
PGO_PATH=$(cat chrome/build/$WITH_PGO.pgo.txt)
fi

255
src/make-cronet-cgo-sdk.sh Executable file
View File

@ -0,0 +1,255 @@
#!/bin/sh
. ./get-sysroot.sh
case "$ARCH" in
Linux)
WITH_GOOS=linux
if [ "$target_os" = 'android' ]; then
WITH_GOOS=android
fi
case "$target_cpu" in
x64) WITH_GOARCH=amd64;;
x86) WITH_GOARCH=386;;
arm64) WITH_GOARCH=arm64;;
arm) WITH_GOARCH=arm;;
mipsel) WITH_GOARCH=mipsle;;
mips64el) WITH_GOARCH=mips64le;;
esac
shared_lib_name='libcronet.so'
static_lib_name='libcronet_static.a'
;;
Windows)
WITH_GOOS=windows
case "$target_cpu" in
x64) WITH_GOARCH=amd64;;
x86) WITH_GOARCH=386;;
arm64) WITH_GOARCH=arm64;;
esac
shared_lib_name='cronet.dll.lib'
dll_name='cronet.dll'
static_lib_name='cronet_static.lib'
;;
Darwin)
WITH_GOOS=darwin
case "$target_cpu" in
x64) WITH_GOARCH=amd64;;
arm64) WITH_GOARCH=arm64;;
esac
shared_lib_name='libcronet.dylib'
static_lib_name='libcronet_static.a'
;;
esac
set -ex
mkdir -p out/Release/cronet
getflag() {
local ninjafile=./out/Release/obj/components/cronet/$1.ninja
local flagname="$2"
grep "\<$flagname = " $ninjafile | cut -d= -f2- | sed 's/\$:/:/g;s/\\%/%/g;s/\\\$\$/\$/g' | sed "s#=\.\./#=$PWD/out/Release/../#g"
}
if [ "$target_os" = 'android' ]; then
WITH_SYSROOT='third_party/android_ndk/toolchains/llvm/prebuilt/linux-x86_64/sysroot'
fi
# Extracts this manually because the sysroot flag generated by Chromium is bundled with other irrelevant stuff.
if [ "$WITH_SYSROOT" ]; then
cgo_sysroot_flag='--sysroot=$PWD/sysroot'
link_sysroot_flag='--sysroot=${SRCDIR}/sysroot'
fi
# Mac SDK path should be detected in the CGO builder.
if [ "$ARCH" = 'Darwin' ]; then
cgo_sysroot_flag='-isysroot $PWD/sysroot'
link_sysroot_flag='-isysroot ${SRCDIR}/sysroot'
fi
cp -a out/Release/$shared_lib_name out/Release/cronet/
if [ "$ARCH" = 'Windows' ]; then
cp -a out/Release/$dll_name out/Release/cronet/
fi
cp -a out/Release/obj/components/cronet/$static_lib_name out/Release/cronet/
cp -a components/cronet/native/sample/cronet_example.go out/Release/cronet/
cp -a components/cronet/native/generated/cronet.idl_c.h out/Release/cronet/
cp -a components/cronet/native/include/cronet_c.h out/Release/cronet/
cp -a components/cronet/native/include/cronet_export.h out/Release/cronet/
cp -a components/grpc_support/include/bidirectional_stream_c.h out/Release/cronet/
if [ "$WITH_SYSROOT" ]; then
cp -a "$PWD/$WITH_SYSROOT" out/Release/cronet/sysroot
fi
if [ "$target_os" = 'android' ]; then
# Included by base/BUILD.gn
cp -a base/android/library_loader/anchor_functions.lds out/Release/cronet
fi
cp -a components/cronet/native/go-build.sh out/Release/cronet/
# CGO's intermediate C files are very small. They need no optimization and only very basic flags.
getcgoflags() {
# -mllvm: avoid confusion with -m*; we don't use this flag in cgo flags anyway.
# ' -march= ': artifact during OpenWrt build
# -arch xyz: Mac specific
# -fmsc-version=: Windows specific
# -Wl,--dynamic-linker=: OpenWrt specific
# --unwindlib=: Android specific
sed 's/-mllvm[ :][^ ]*//g;s/ -march= / /g' | grep -Eo ' (-fuse-ld=|--target=|-m|-arch |-fmsc-version=|-Wl,--dynamic-linker=|--unwindlib=)[^ ]*' | tr -d '\n'
}
cgo_cflags="$(getflag cronet_example_external cflags | getcgoflags) $cgo_sysroot_flag"
cgo_ldflags="$(getflag cronet_example_external ldflags | getcgoflags) $cgo_sysroot_flag"
# sysroot: It helps cronet_example_external compile, but CGO uses manually constructed sysroot option.
# NATVIS: Windows specific; cannot be turned off cleanly with GN flags, so removes it manually here.
shared_ldflags="$(getflag cronet_example_external ldflags | sed 's/-isysroot [^ ]*//g;s#/NATVIS:[^ ]*##g') $link_sysroot_flag"
static_ldflags="$(getflag cronet_example_external_static ldflags | sed 's/-isysroot [^ ]*//g;s#/NATVIS:[^ ]*##g') $link_sysroot_flag"
shared_solibs="$(getflag cronet_example_external solibs)"
static_solibs="$(getflag cronet_example_external_static solibs)"
shared_libs="$(getflag cronet_example_external libs)"
static_libs="$(getflag cronet_example_external_static libs)"
shared_frameworks="$(getflag cronet_example_external frameworks)"
static_frameworks="$(getflag cronet_example_external_static frameworks)"
if [ "$ARCH" = 'Linux' ]; then
static_libs="./$static_lib_name $static_libs"
# Regular Linux seems to require this.
if [ ! "$target_os" ]; then
static_libs="$static_libs -lm"
fi
if [ "$target_os" = 'android' ]; then
static_libs="$(echo $static_libs | sed 's#[^ ]*/anchor_functions.lds#./anchor_functions.lds#')"
fi
elif [ "$ARCH" = 'Windows' ]; then
# -Wno-dll-attribute-on-redeclaration: https://github.com/golang/go/issues/46502
cgo_cflags="$cgo_cflags -Wno-dll-attribute-on-redeclaration"
# Chromium uses clang-cl.exe, but CGO officially only supports GCC/MinGW on Windows. See https://github.com/golang/go/issues/17014.
# 1. CGO hardcodes GCC options incompatible with clang-cl, so an extra clang.exe is required (Chromium only provides clang-cl.exe).
# 2. We need CGO to link LLVM bitcode from Chromium, so ld cannot work and lld is required.
# 3. CGO passes GCC options incompatible with lld, so an extra lld wrapper is required to remove those options.
# 4. I didn't figure out a way to make the whole pipeline use lld-link-wrapper cleanly:
# * `-fuse-ld=lld --ld-path=lld-link-wrapper` reports `--ld-path` is an unknown argument.
# * `-fuse-ld=.../lld-link-wrapper.exe` creates garbled linker path.
# So uses a hack to rename lld-link.exe to lld-link-old.exe which is called from the wrapper "lld-link.exe".
# 5. lld-13 does not work with bitcode produced by Chromium's lld-15.
# So copies clang-13 from environment to Chromium's LLVM bin directory, and uses clang-13 together with lld-15.
cgo_ldflags="-fuse-ld=lld $cgo_ldflags"
# Helps clang find architecture-specific libpath.
# Chromium uses setup_toolchain.py to create -libpath flags. It's too complicated.
# This option is already in cflags.
if [ "$target_cpu" = 'arm64' ]; then
cgo_ldflags="$cgo_ldflags --target=arm64-windows"
fi
# Hardcodes sys_lib_flags values from build/toolchain/win/BUILD.gn instead of extracting it from GN artifacts.
case "$target_cpu" in
x64) cgo_ldflags="$cgo_ldflags -Wl,/MACHINE:X64";;
x86) cgo_ldflags="$cgo_ldflags -Wl,/MACHINE:X86";;
arm64) cgo_ldflags="$cgo_ldflags -Wl,/MACHINE:ARM64";;
esac
# Chromium enables /SAFESEH for x86 with clang-cl, but CGO compiles gcc_386.S with GCC which does not support /SAFESEH.
# So has to remove /SAFESEH for x86.
if [ "$target_cpu" = 'x86' ]; then
cgo_ldflags="$cgo_ldflags -Wl,/SAFESEH:NO"
fi
# Chromium uses lld-link separately, but CGO calls lld-link through clang, so linker options must be wrapped in clang options.
escapelinkerflags() {
for i in "$@"; do
if echo "$i" | grep -q ','; then
echo -n " -Xlinker $i"
else
echo -n " -Wl,$i"
fi
done
}
shared_ldflags="$cgo_ldflags $(escapelinkerflags $shared_ldflags)"
static_ldflags="$cgo_ldflags $(escapelinkerflags $static_ldflags)"
# xyz.lib must be wrapped in clang options
shared_libs="./$shared_lib_name $(echo $shared_libs | sed 's/\([a-z0-9_]*\)\.lib/-l\1/g' )"
static_libs="./$static_lib_name $(echo $static_libs | sed 's/\([a-z0-9_]*\)\.lib/-l\1/g' )"
elif [ "$ARCH" = 'Darwin' ]; then
static_libs="./$static_lib_name $static_libs"
fi
if [ "$ARCH" = 'Linux' ]; then
# Follows the order of tool("link") from build/toolchain/gcc_toolchain.gni
shared_ldflags="$shared_ldflags $shared_solibs $shared_libs"
static_ldflags="$static_ldflags $static_solibs $static_libs"
elif [ "$ARCH" = 'Windows' ]; then
# Follows the order of tool("link") from build/toolchain/win/toolchain.gni
shared_ldflags="$sys_lib_flags $shared_libs $shared_solibs $shared_ldflags"
static_ldflags="$sys_lib_flags $static_libs $static_solibs $static_ldflags"
elif [ "$ARCH" = 'Darwin' ]; then
# Follows the order of tool("link") from build/toolchain/apple/toolchain.gni
shared_ldflags="$shared_ldflags $shared_frameworks $shared_solibs $shared_libs"
static_ldflags="$static_ldflags $static_frameworks $static_solibs $static_libs"
fi
# CGO adds -marm, which conflicts with -mthumb used by various OpenWrt targets.
if [ "$target_cpu" = 'arm' ]; then
cgo_cflags=$(echo "$cgo_cflags" | sed 's/ -mthumb / /g')
fi
buildmode_flag='-buildmode=pie'
if [ "$target_cpu" = 'mipsel' -o "$target_cpu" = 'mips64el' ]; then
# CGO does not support PIE for linux/mipsle,mipe64le.
buildmode_flag=
elif [ "$target_cpu" = 'arm64' -a "$ARCH" = 'Windows' ]; then
# CGO does not support PIE for windows/arm64.
buildmode_flag=
elif [ "$target_cpu" = 'x86' -a "$target_os" != 'android' ]; then
# Segfaults if built with PIE in regular Linux. TODO: Find out why.
buildmode_flag=
fi
# Requires explicit -nopie otherwise clang adds -pie to lld sometime.
if [ ! "$buildmode_flag" ]; then
shared_ldflags="$(echo "$shared_ldflags" | sed 's/ -pie / /g') -nopie"
static_ldflags="$(echo "$static_ldflags" | sed 's/ -pie / /g') -nopie"
fi
# Avoids section type mismatch for .debug_info etc on MIPS.
# This is probably caused by different expectation between LLVM and CGO's GCC.
if [ "$target_cpu" = 'mipsel' -o "$target_cpu" = 'mips64el' ]; then
shared_ldflags="$shared_ldflags -Wl,--strip-debug"
static_ldflags="$static_ldflags -Wl,--strip-debug"
fi
# Allows running cronet_example test case without explicit LD_LIBRARY_PATH.
if [ "$ARCH" = 'Linux' ]; then
shared_ldflags="$shared_ldflags -Wl,-rpath,\$ORIGIN"
fi
cat >out/Release/cronet/go_env.sh <<EOF
ARCH=$ARCH
target_cpu=$target_cpu
CLANG_REVISION=$CLANG_REVISION
WITH_CLANG=$WITH_CLANG
WITH_QEMU=$WITH_QEMU
WITH_ANDROID_IMG=$WITH_ANDROID_IMG
buildmode_flag=$buildmode_flag
[ "$WITH_GOOS" -a "$WITH_GOARCH" ] && export GOOS="$WITH_GOOS"
[ "$WITH_GOARCH" -a "$WITH_GOARCH" ] && export GOARCH="$WITH_GOARCH"
[ "$mips_float_abi" = "soft" ] && export GOMIPS=softfloat
export CGO_CFLAGS="$cgo_cflags"
export CGO_LDFLAGS="$cgo_ldflags"
EOF
cat >out/Release/cronet/link_shared.go <<EOF
package main
// #cgo LDFLAGS: $shared_ldflags
import "C"
EOF
cat >out/Release/cronet/link_static.go <<EOF
package main
// #cgo LDFLAGS: $static_ldflags
import "C"
EOF

View File

@ -1728,3 +1728,29 @@ static_library("preload_decoder") {
]
deps = [ "//base" ]
}
executable("naive") {
sources = [
"tools/naive/naive_connection.cc",
"tools/naive/naive_connection.h",
"tools/naive/naive_proxy.cc",
"tools/naive/naive_proxy.h",
"tools/naive/naive_proxy_bin.cc",
"tools/naive/naive_proxy_delegate.h",
"tools/naive/naive_proxy_delegate.cc",
"tools/naive/http_proxy_socket.cc",
"tools/naive/http_proxy_socket.h",
"tools/naive/redirect_resolver.h",
"tools/naive/redirect_resolver.cc",
"tools/naive/socks5_server_socket.cc",
"tools/naive/socks5_server_socket.h",
]
deps = [
":net",
"//base",
"//build/win:default_exe_manifest",
"//components/version_info:version_info",
"//url",
]
}

View File

@ -207,6 +207,14 @@ void BidirectionalStream::StartRequest(const SSLConfig& ssl_config) {
HttpRequestInfo http_request_info;
http_request_info.url = request_info_->url;
http_request_info.method = request_info_->method;
std::string network_isolation_key_header;
if (request_info_->extra_headers.GetHeader("-network-isolation-key",
&network_isolation_key_header)) {
request_info_->extra_headers.RemoveHeader("-network-isolation-key");
net::SchemefulSite site(GURL{network_isolation_key_header});
CHECK(!site.opaque());
http_request_info.network_isolation_key = NetworkIsolationKey(site, site);
}
http_request_info.extra_headers = request_info_->extra_headers;
http_request_info.socket_tag = request_info_->socket_tag;
stream_request_ =

View File

@ -488,6 +488,11 @@ EVENT_TYPE(SOCKS_HOSTNAME_TOO_BIG)
EVENT_TYPE(SOCKS_UNEXPECTEDLY_CLOSED_DURING_GREETING)
EVENT_TYPE(SOCKS_UNEXPECTEDLY_CLOSED_DURING_HANDSHAKE)
EVENT_TYPE(SOCKS_NO_REQUESTED_AUTH)
EVENT_TYPE(SOCKS_NO_ACCEPTABLE_AUTH)
EVENT_TYPE(SOCKS_ZERO_LENGTH_DOMAIN)
EVENT_TYPE(SOCKS_UNEXPECTED_COMMAND)
// This event indicates that a bad version number was received in the
// proxy server's response. The extra parameters show its value:
// {

View File

@ -0,0 +1,360 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Copyright 2018 klzgrad <kizdiv@gmail.com>. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/tools/naive/http_proxy_socket.h"
#include <cstring>
#include <utility>
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/logging.h"
#include "base/rand_util.h"
#include "base/sys_byteorder.h"
#include "net/base/ip_address.h"
#include "net/base/net_errors.h"
#include "net/http/http_request_headers.h"
#include "net/log/net_log.h"
#include "net/third_party/quiche/src/spdy/core/hpack/hpack_constants.h"
#include "net/tools/naive/naive_proxy_delegate.h"
namespace net {
namespace {
constexpr int kBufferSize = 64 * 1024;
constexpr size_t kMaxHeaderSize = 64 * 1024;
constexpr char kResponseHeader[] = "HTTP/1.1 200 OK\r\nPadding: ";
constexpr int kResponseHeaderSize = sizeof(kResponseHeader) - 1;
// A plain 200 is 10 bytes. Expected 48 bytes. "Padding" uses up 7 bytes.
constexpr int kMinPaddingSize = 30;
constexpr int kMaxPaddingSize = kMinPaddingSize + 32;
} // namespace
HttpProxySocket::HttpProxySocket(
std::unique_ptr<StreamSocket> transport_socket,
ClientPaddingDetectorDelegate* padding_detector_delegate,
const NetworkTrafficAnnotationTag& traffic_annotation)
: io_callback_(base::BindRepeating(&HttpProxySocket::OnIOComplete,
base::Unretained(this))),
transport_(std::move(transport_socket)),
padding_detector_delegate_(padding_detector_delegate),
next_state_(STATE_NONE),
completed_handshake_(false),
was_ever_used_(false),
header_write_size_(-1),
net_log_(transport_->NetLog()),
traffic_annotation_(traffic_annotation) {}
HttpProxySocket::~HttpProxySocket() {
Disconnect();
}
const HostPortPair& HttpProxySocket::request_endpoint() const {
return request_endpoint_;
}
int HttpProxySocket::Connect(CompletionOnceCallback callback) {
DCHECK(transport_);
DCHECK_EQ(STATE_NONE, next_state_);
DCHECK(!user_callback_);
// If already connected, then just return OK.
if (completed_handshake_)
return OK;
next_state_ = STATE_HEADER_READ;
buffer_.clear();
int rv = DoLoop(OK);
if (rv == ERR_IO_PENDING) {
user_callback_ = std::move(callback);
}
return rv;
}
void HttpProxySocket::Disconnect() {
completed_handshake_ = false;
transport_->Disconnect();
// Reset other states to make sure they aren't mistakenly used later.
// These are the states initialized by Connect().
next_state_ = STATE_NONE;
user_callback_.Reset();
}
bool HttpProxySocket::IsConnected() const {
return completed_handshake_ && transport_->IsConnected();
}
bool HttpProxySocket::IsConnectedAndIdle() const {
return completed_handshake_ && transport_->IsConnectedAndIdle();
}
const NetLogWithSource& HttpProxySocket::NetLog() const {
return net_log_;
}
bool HttpProxySocket::WasEverUsed() const {
return was_ever_used_;
}
bool HttpProxySocket::WasAlpnNegotiated() const {
if (transport_) {
return transport_->WasAlpnNegotiated();
}
NOTREACHED();
return false;
}
NextProto HttpProxySocket::GetNegotiatedProtocol() const {
if (transport_) {
return transport_->GetNegotiatedProtocol();
}
NOTREACHED();
return kProtoUnknown;
}
bool HttpProxySocket::GetSSLInfo(SSLInfo* ssl_info) {
if (transport_) {
return transport_->GetSSLInfo(ssl_info);
}
NOTREACHED();
return false;
}
void HttpProxySocket::GetConnectionAttempts(ConnectionAttempts* out) const {
out->clear();
}
int64_t HttpProxySocket::GetTotalReceivedBytes() const {
return transport_->GetTotalReceivedBytes();
}
void HttpProxySocket::ApplySocketTag(const SocketTag& tag) {
return transport_->ApplySocketTag(tag);
}
// Read is called by the transport layer above to read. This can only be done
// if the HTTP header is complete.
int HttpProxySocket::Read(IOBuffer* buf,
int buf_len,
CompletionOnceCallback callback) {
DCHECK(completed_handshake_);
DCHECK_EQ(STATE_NONE, next_state_);
DCHECK(!user_callback_);
DCHECK(callback);
if (!buffer_.empty()) {
was_ever_used_ = true;
int data_len = buffer_.size();
if (data_len <= buf_len) {
std::memcpy(buf->data(), buffer_.data(), data_len);
buffer_.clear();
return data_len;
} else {
std::memcpy(buf->data(), buffer_.data(), buf_len);
buffer_ = buffer_.substr(buf_len);
return buf_len;
}
}
int rv = transport_->Read(
buf, buf_len,
base::BindOnce(&HttpProxySocket::OnReadWriteComplete,
base::Unretained(this), std::move(callback)));
if (rv > 0)
was_ever_used_ = true;
return rv;
}
// Write is called by the transport layer. This can only be done if the
// SOCKS handshake is complete.
int HttpProxySocket::Write(
IOBuffer* buf,
int buf_len,
CompletionOnceCallback callback,
const NetworkTrafficAnnotationTag& traffic_annotation) {
DCHECK(completed_handshake_);
DCHECK_EQ(STATE_NONE, next_state_);
DCHECK(!user_callback_);
DCHECK(callback);
int rv = transport_->Write(
buf, buf_len,
base::BindOnce(&HttpProxySocket::OnReadWriteComplete,
base::Unretained(this), std::move(callback)),
traffic_annotation);
if (rv > 0)
was_ever_used_ = true;
return rv;
}
int HttpProxySocket::SetReceiveBufferSize(int32_t size) {
return transport_->SetReceiveBufferSize(size);
}
int HttpProxySocket::SetSendBufferSize(int32_t size) {
return transport_->SetSendBufferSize(size);
}
void HttpProxySocket::DoCallback(int result) {
DCHECK_NE(ERR_IO_PENDING, result);
DCHECK(user_callback_);
// Since Run() may result in Read being called,
// clear user_callback_ up front.
std::move(user_callback_).Run(result);
}
void HttpProxySocket::OnIOComplete(int result) {
DCHECK_NE(STATE_NONE, next_state_);
int rv = DoLoop(result);
if (rv != ERR_IO_PENDING) {
DoCallback(rv);
}
}
void HttpProxySocket::OnReadWriteComplete(CompletionOnceCallback callback,
int result) {
DCHECK_NE(ERR_IO_PENDING, result);
DCHECK(callback);
if (result > 0)
was_ever_used_ = true;
std::move(callback).Run(result);
}
int HttpProxySocket::DoLoop(int last_io_result) {
DCHECK_NE(next_state_, STATE_NONE);
int rv = last_io_result;
do {
State state = next_state_;
next_state_ = STATE_NONE;
switch (state) {
case STATE_HEADER_READ:
DCHECK_EQ(OK, rv);
rv = DoHeaderRead();
break;
case STATE_HEADER_READ_COMPLETE:
rv = DoHeaderReadComplete(rv);
break;
case STATE_HEADER_WRITE:
DCHECK_EQ(OK, rv);
rv = DoHeaderWrite();
break;
case STATE_HEADER_WRITE_COMPLETE:
rv = DoHeaderWriteComplete(rv);
break;
default:
NOTREACHED() << "bad state";
rv = ERR_UNEXPECTED;
break;
}
} while (rv != ERR_IO_PENDING && next_state_ != STATE_NONE);
return rv;
}
int HttpProxySocket::DoHeaderRead() {
next_state_ = STATE_HEADER_READ_COMPLETE;
handshake_buf_ = base::MakeRefCounted<IOBuffer>(kBufferSize);
return transport_->Read(handshake_buf_.get(), kBufferSize, io_callback_);
}
int HttpProxySocket::DoHeaderReadComplete(int result) {
if (result < 0)
return result;
if (result == 0) {
return ERR_CONNECTION_CLOSED;
}
buffer_.append(handshake_buf_->data(), result);
if (buffer_.size() > kMaxHeaderSize) {
return ERR_MSG_TOO_BIG;
}
auto header_end = buffer_.find("\r\n\r\n");
if (header_end == std::string::npos) {
next_state_ = STATE_HEADER_READ;
return OK;
}
// HttpProxyClientSocket uses CONNECT for all endpoints.
auto first_line_end = buffer_.find("\r\n");
auto first_space = buffer_.find(' ');
if (first_space == std::string::npos || first_space + 1 >= first_line_end) {
return ERR_INVALID_ARGUMENT;
}
if (buffer_.compare(0, first_space, "CONNECT") != 0) {
return ERR_INVALID_ARGUMENT;
}
auto second_space = buffer_.find(' ', first_space + 1);
if (second_space == std::string::npos || second_space >= first_line_end) {
return ERR_INVALID_ARGUMENT;
}
request_endpoint_ = HostPortPair::FromString(
buffer_.substr(first_space + 1, second_space - (first_space + 1)));
auto second_line = first_line_end + 2;
HttpRequestHeaders headers;
std::string headers_str;
if (second_line < header_end) {
headers_str = buffer_.substr(second_line, header_end - second_line);
headers.AddHeadersFromString(headers_str);
}
if (headers.HasHeader("padding")) {
padding_detector_delegate_->SetClientPaddingSupport(
PaddingSupport::kCapable);
} else {
padding_detector_delegate_->SetClientPaddingSupport(
PaddingSupport::kIncapable);
}
buffer_ = buffer_.substr(header_end + 4);
next_state_ = STATE_HEADER_WRITE;
return OK;
}
int HttpProxySocket::DoHeaderWrite() {
next_state_ = STATE_HEADER_WRITE_COMPLETE;
// Adds padding.
int padding_size = base::RandInt(kMinPaddingSize, kMaxPaddingSize);
header_write_size_ = kResponseHeaderSize + padding_size + 4;
handshake_buf_ = base::MakeRefCounted<IOBuffer>(header_write_size_);
char* p = handshake_buf_->data();
std::memcpy(p, kResponseHeader, kResponseHeaderSize);
FillNonindexHeaderValue(base::RandUint64(), p + kResponseHeaderSize,
padding_size);
std::memcpy(p + kResponseHeaderSize + padding_size, "\r\n\r\n", 4);
return transport_->Write(handshake_buf_.get(), header_write_size_,
io_callback_, traffic_annotation_);
}
int HttpProxySocket::DoHeaderWriteComplete(int result) {
if (result < 0)
return result;
if (result != header_write_size_) {
return ERR_FAILED;
}
completed_handshake_ = true;
next_state_ = STATE_NONE;
return OK;
}
int HttpProxySocket::GetPeerAddress(IPEndPoint* address) const {
return transport_->GetPeerAddress(address);
}
int HttpProxySocket::GetLocalAddress(IPEndPoint* address) const {
return transport_->GetLocalAddress(address);
}
} // namespace net

View File

@ -0,0 +1,125 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Copyright 2018 klzgrad <kizdiv@gmail.com>. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef NET_TOOLS_NAIVE_HTTP_PROXY_SOCKET_H_
#define NET_TOOLS_NAIVE_HTTP_PROXY_SOCKET_H_
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include "base/memory/scoped_refptr.h"
#include "net/base/completion_once_callback.h"
#include "net/base/completion_repeating_callback.h"
#include "net/base/host_port_pair.h"
#include "net/base/io_buffer.h"
#include "net/base/ip_endpoint.h"
#include "net/log/net_log_with_source.h"
#include "net/socket/connection_attempts.h"
#include "net/socket/next_proto.h"
#include "net/socket/stream_socket.h"
#include "net/ssl/ssl_info.h"
namespace net {
struct NetworkTrafficAnnotationTag;
class ClientPaddingDetectorDelegate;
// This StreamSocket is used to setup a HTTP CONNECT tunnel.
class HttpProxySocket : public StreamSocket {
public:
HttpProxySocket(std::unique_ptr<StreamSocket> transport_socket,
ClientPaddingDetectorDelegate* padding_detector_delegate,
const NetworkTrafficAnnotationTag& traffic_annotation);
HttpProxySocket(const HttpProxySocket&) = delete;
HttpProxySocket& operator=(const HttpProxySocket&) = delete;
// On destruction Disconnect() is called.
~HttpProxySocket() override;
const HostPortPair& request_endpoint() const;
// StreamSocket implementation.
int Connect(CompletionOnceCallback callback) override;
void Disconnect() override;
bool IsConnected() const override;
bool IsConnectedAndIdle() const override;
const NetLogWithSource& NetLog() const override;
bool WasEverUsed() const override;
bool WasAlpnNegotiated() const override;
NextProto GetNegotiatedProtocol() const override;
bool GetSSLInfo(SSLInfo* ssl_info) override;
void GetConnectionAttempts(ConnectionAttempts* out) const override;
void ClearConnectionAttempts() override {}
void AddConnectionAttempts(const ConnectionAttempts& attempts) override {}
int64_t GetTotalReceivedBytes() const override;
void ApplySocketTag(const SocketTag& tag) override;
// Socket implementation.
int Read(IOBuffer* buf,
int buf_len,
CompletionOnceCallback callback) override;
int Write(IOBuffer* buf,
int buf_len,
CompletionOnceCallback callback,
const NetworkTrafficAnnotationTag& traffic_annotation) override;
int SetReceiveBufferSize(int32_t size) override;
int SetSendBufferSize(int32_t size) override;
int GetPeerAddress(IPEndPoint* address) const override;
int GetLocalAddress(IPEndPoint* address) const override;
private:
enum State {
STATE_HEADER_READ,
STATE_HEADER_READ_COMPLETE,
STATE_HEADER_WRITE,
STATE_HEADER_WRITE_COMPLETE,
STATE_NONE,
};
void DoCallback(int result);
void OnIOComplete(int result);
void OnReadWriteComplete(CompletionOnceCallback callback, int result);
int DoLoop(int last_io_result);
int DoHeaderWrite();
int DoHeaderWriteComplete(int result);
int DoHeaderRead();
int DoHeaderReadComplete(int result);
CompletionRepeatingCallback io_callback_;
// Stores the underlying socket.
std::unique_ptr<StreamSocket> transport_;
ClientPaddingDetectorDelegate* padding_detector_delegate_;
State next_state_;
// Stores the callback to the layer above, called on completing Connect().
CompletionOnceCallback user_callback_;
// This IOBuffer is used by the class to read and write
// SOCKS handshake data. The length contains the expected size to
// read or write.
scoped_refptr<IOBuffer> handshake_buf_;
std::string buffer_;
bool completed_handshake_;
bool was_ever_used_;
int header_write_size_;
HostPortPair request_endpoint_;
NetLogWithSource net_log_;
// Traffic annotation for socket control.
const NetworkTrafficAnnotationTag& traffic_annotation_;
};
} // namespace net
#endif // NET_TOOLS_NAIVE_HTTP_PROXY_SOCKET_H_

View File

@ -0,0 +1,551 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Copyright 2018 klzgrad <kizdiv@gmail.com>. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/tools/naive/naive_connection.h"
#include <cstring>
#include <utility>
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/logging.h"
#include "base/rand_util.h"
#include "base/strings/strcat.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "net/base/io_buffer.h"
#include "net/base/load_flags.h"
#include "net/base/net_errors.h"
#include "net/base/privacy_mode.h"
#include "net/proxy_resolution/proxy_info.h"
#include "net/socket/client_socket_handle.h"
#include "net/socket/client_socket_pool_manager.h"
#include "net/socket/stream_socket.h"
#include "net/spdy/spdy_session.h"
#include "net/tools/naive/http_proxy_socket.h"
#include "net/tools/naive/redirect_resolver.h"
#include "net/tools/naive/socks5_server_socket.h"
#if defined(OS_LINUX)
#include <linux/netfilter_ipv4.h>
#include <netinet/in.h>
#include <sys/socket.h>
#include "net/base/ip_endpoint.h"
#include "net/base/sockaddr_storage.h"
#include "net/socket/tcp_client_socket.h"
#endif
namespace net {
namespace {
constexpr int kBufferSize = 64 * 1024;
constexpr int kFirstPaddings = 8;
constexpr int kPaddingHeaderSize = 3;
constexpr int kMaxPaddingSize = 255;
} // namespace
NaiveConnection::NaiveConnection(
unsigned int id,
ClientProtocol protocol,
std::unique_ptr<PaddingDetectorDelegate> padding_detector_delegate,
const ProxyInfo& proxy_info,
const SSLConfig& server_ssl_config,
const SSLConfig& proxy_ssl_config,
RedirectResolver* resolver,
HttpNetworkSession* session,
const NetworkIsolationKey& network_isolation_key,
const NetLogWithSource& net_log,
std::unique_ptr<StreamSocket> accepted_socket,
const NetworkTrafficAnnotationTag& traffic_annotation)
: id_(id),
protocol_(protocol),
padding_detector_delegate_(std::move(padding_detector_delegate)),
proxy_info_(proxy_info),
server_ssl_config_(server_ssl_config),
proxy_ssl_config_(proxy_ssl_config),
resolver_(resolver),
session_(session),
network_isolation_key_(network_isolation_key),
net_log_(net_log),
next_state_(STATE_NONE),
client_socket_(std::move(accepted_socket)),
server_socket_handle_(std::make_unique<ClientSocketHandle>()),
sockets_{client_socket_.get(), nullptr},
errors_{OK, OK},
write_pending_{false, false},
early_pull_pending_(false),
can_push_to_server_(false),
early_pull_result_(ERR_IO_PENDING),
num_paddings_{0, 0},
read_padding_state_(STATE_READ_PAYLOAD_LENGTH_1),
full_duplex_(false),
time_func_(&base::TimeTicks::Now),
traffic_annotation_(traffic_annotation) {
io_callback_ = base::BindRepeating(&NaiveConnection::OnIOComplete,
weak_ptr_factory_.GetWeakPtr());
}
NaiveConnection::~NaiveConnection() {
Disconnect();
}
int NaiveConnection::Connect(CompletionOnceCallback callback) {
DCHECK(client_socket_);
DCHECK_EQ(next_state_, STATE_NONE);
DCHECK(!connect_callback_);
if (full_duplex_)
return OK;
next_state_ = STATE_CONNECT_CLIENT;
int rv = DoLoop(OK);
if (rv == ERR_IO_PENDING) {
connect_callback_ = std::move(callback);
}
return rv;
}
void NaiveConnection::Disconnect() {
full_duplex_ = false;
// Closes server side first because latency is higher.
if (server_socket_handle_->socket())
server_socket_handle_->socket()->Disconnect();
client_socket_->Disconnect();
next_state_ = STATE_NONE;
connect_callback_.Reset();
run_callback_.Reset();
}
void NaiveConnection::DoCallback(int result) {
DCHECK_NE(result, ERR_IO_PENDING);
DCHECK(connect_callback_);
// Since Run() may result in Read being called,
// clear connect_callback_ up front.
std::move(connect_callback_).Run(result);
}
void NaiveConnection::OnIOComplete(int result) {
DCHECK_NE(next_state_, STATE_NONE);
int rv = DoLoop(result);
if (rv != ERR_IO_PENDING) {
DoCallback(rv);
}
}
int NaiveConnection::DoLoop(int last_io_result) {
DCHECK_NE(next_state_, STATE_NONE);
int rv = last_io_result;
do {
State state = next_state_;
next_state_ = STATE_NONE;
switch (state) {
case STATE_CONNECT_CLIENT:
DCHECK_EQ(rv, OK);
rv = DoConnectClient();
break;
case STATE_CONNECT_CLIENT_COMPLETE:
rv = DoConnectClientComplete(rv);
break;
case STATE_CONNECT_SERVER:
DCHECK_EQ(rv, OK);
rv = DoConnectServer();
break;
case STATE_CONNECT_SERVER_COMPLETE:
rv = DoConnectServerComplete(rv);
break;
default:
NOTREACHED() << "bad state";
rv = ERR_UNEXPECTED;
break;
}
} while (rv != ERR_IO_PENDING && next_state_ != STATE_NONE);
return rv;
}
int NaiveConnection::DoConnectClient() {
next_state_ = STATE_CONNECT_CLIENT_COMPLETE;
return client_socket_->Connect(io_callback_);
}
int NaiveConnection::DoConnectClientComplete(int result) {
if (result < 0)
return result;
// For proxy client sockets, padding support detection is finished after the
// first server response which means there will be one missed early pull. For
// proxy server sockets (HttpProxySocket), padding support detection is
// done during client connect, so there shouldn't be any missed early pull.
if (!padding_detector_delegate_->IsPaddingSupportKnown()) {
early_pull_pending_ = false;
early_pull_result_ = 0;
next_state_ = STATE_CONNECT_SERVER;
return OK;
}
early_pull_pending_ = true;
Pull(kClient, kServer);
if (early_pull_result_ != ERR_IO_PENDING) {
// Pull has completed synchronously.
if (early_pull_result_ <= 0) {
return early_pull_result_ ? early_pull_result_ : ERR_CONNECTION_CLOSED;
}
}
next_state_ = STATE_CONNECT_SERVER;
return OK;
}
int NaiveConnection::DoConnectServer() {
next_state_ = STATE_CONNECT_SERVER_COMPLETE;
HostPortPair origin;
if (protocol_ == ClientProtocol::kSocks5) {
const auto* socket =
static_cast<const Socks5ServerSocket*>(client_socket_.get());
origin = socket->request_endpoint();
} else if (protocol_ == ClientProtocol::kHttp) {
const auto* socket =
static_cast<const HttpProxySocket*>(client_socket_.get());
origin = socket->request_endpoint();
} else if (protocol_ == ClientProtocol::kRedir) {
#if defined(OS_LINUX)
const auto* socket =
static_cast<const TCPClientSocket*>(client_socket_.get());
int sd = socket->SocketDescriptorForTesting();
SockaddrStorage dst;
int rv;
rv = getsockopt(sd, SOL_IP, SO_ORIGINAL_DST, dst.addr, &dst.addr_len);
if (rv == 0) {
IPEndPoint ipe;
if (ipe.FromSockAddr(dst.addr, dst.addr_len)) {
const auto& addr = ipe.address();
auto name = resolver_->FindNameByAddress(addr);
if (!name.empty()) {
origin = HostPortPair(name, ipe.port());
} else if (!resolver_->IsInResolvedRange(addr)) {
origin = HostPortPair::FromIPEndPoint(ipe);
} else {
LOG(ERROR) << "Connection " << id_ << " to unresolved name for "
<< addr.ToString();
return ERR_ADDRESS_INVALID;
}
}
}
#else
static_cast<void>(resolver_);
#endif
}
if (origin.IsEmpty()) {
LOG(ERROR) << "Connection " << id_ << " to invalid origin";
return ERR_ADDRESS_INVALID;
}
LOG(INFO) << "Connection " << id_ << " to " << origin.ToString();
// Ignores socket limit set by socket pool for this type of socket.
return InitSocketHandleForRawConnect2(
origin, session_, LOAD_IGNORE_LIMITS, MAXIMUM_PRIORITY, proxy_info_,
server_ssl_config_, proxy_ssl_config_, PRIVACY_MODE_DISABLED,
network_isolation_key_, net_log_, server_socket_handle_.get(),
io_callback_);
}
int NaiveConnection::DoConnectServerComplete(int result) {
if (result < 0)
return result;
DCHECK(server_socket_handle_->socket());
sockets_[kServer] = server_socket_handle_->socket();
full_duplex_ = true;
next_state_ = STATE_NONE;
return OK;
}
int NaiveConnection::Run(CompletionOnceCallback callback) {
DCHECK(sockets_[kClient]);
DCHECK(sockets_[kServer]);
DCHECK_EQ(next_state_, STATE_NONE);
DCHECK(!connect_callback_);
if (errors_[kClient] != OK)
return errors_[kClient];
if (errors_[kServer] != OK)
return errors_[kServer];
run_callback_ = std::move(callback);
bytes_passed_without_yielding_[kClient] = 0;
bytes_passed_without_yielding_[kServer] = 0;
yield_after_time_[kClient] =
time_func_() + base::Milliseconds(kYieldAfterDurationMilliseconds);
yield_after_time_[kServer] = yield_after_time_[kClient];
can_push_to_server_ = true;
// early_pull_result_ == 0 means the early pull was not started because
// padding support was not yet known.
if (!early_pull_pending_ && early_pull_result_ == 0) {
Pull(kClient, kServer);
} else if (!early_pull_pending_) {
DCHECK_GT(early_pull_result_, 0);
Push(kClient, kServer, early_pull_result_);
}
Pull(kServer, kClient);
return ERR_IO_PENDING;
}
void NaiveConnection::Pull(Direction from, Direction to) {
if (errors_[kClient] < 0 || errors_[kServer] < 0)
return;
int read_size = kBufferSize;
auto padding_direction = padding_detector_delegate_->GetPaddingDirection();
if (from == padding_direction && num_paddings_[from] < kFirstPaddings) {
auto buffer = base::MakeRefCounted<GrowableIOBuffer>();
buffer->SetCapacity(kBufferSize);
buffer->set_offset(kPaddingHeaderSize);
read_buffers_[from] = buffer;
read_size = kBufferSize - kPaddingHeaderSize - kMaxPaddingSize;
} else {
read_buffers_[from] = base::MakeRefCounted<IOBuffer>(kBufferSize);
}
DCHECK(sockets_[from]);
int rv = sockets_[from]->Read(
read_buffers_[from].get(), read_size,
base::BindRepeating(&NaiveConnection::OnPullComplete,
weak_ptr_factory_.GetWeakPtr(), from, to));
if (from == kClient && early_pull_pending_)
early_pull_result_ = rv;
if (rv != ERR_IO_PENDING)
OnPullComplete(from, to, rv);
}
void NaiveConnection::Push(Direction from, Direction to, int size) {
int write_size = size;
int write_offset = 0;
auto padding_direction = padding_detector_delegate_->GetPaddingDirection();
if (from == padding_direction && num_paddings_[from] < kFirstPaddings) {
// Adds padding.
++num_paddings_[from];
int padding_size = base::RandInt(0, kMaxPaddingSize);
auto* buffer = static_cast<GrowableIOBuffer*>(read_buffers_[from].get());
buffer->set_offset(0);
uint8_t* p = reinterpret_cast<uint8_t*>(buffer->data());
p[0] = size / 256;
p[1] = size % 256;
p[2] = padding_size;
std::memset(p + kPaddingHeaderSize + size, 0, padding_size);
write_size = kPaddingHeaderSize + size + padding_size;
} else if (to == padding_direction && num_paddings_[from] < kFirstPaddings) {
// Removes padding.
const char* p = read_buffers_[from]->data();
bool trivial_padding = false;
if (read_padding_state_ == STATE_READ_PAYLOAD_LENGTH_1 &&
size >= kPaddingHeaderSize) {
int payload_size =
static_cast<uint8_t>(p[0]) * 256 + static_cast<uint8_t>(p[1]);
int padding_size = static_cast<uint8_t>(p[2]);
if (size == kPaddingHeaderSize + payload_size + padding_size) {
write_size = payload_size;
write_offset = kPaddingHeaderSize;
++num_paddings_[from];
trivial_padding = true;
}
}
if (!trivial_padding) {
auto unpadded_buffer = base::MakeRefCounted<IOBuffer>(kBufferSize);
char* unpadded_ptr = unpadded_buffer->data();
for (int i = 0; i < size;) {
if (num_paddings_[from] >= kFirstPaddings &&
read_padding_state_ == STATE_READ_PAYLOAD_LENGTH_1) {
std::memcpy(unpadded_ptr, p + i, size - i);
unpadded_ptr += size - i;
break;
}
int copy_size;
switch (read_padding_state_) {
case STATE_READ_PAYLOAD_LENGTH_1:
payload_length_ = static_cast<uint8_t>(p[i]);
++i;
read_padding_state_ = STATE_READ_PAYLOAD_LENGTH_2;
break;
case STATE_READ_PAYLOAD_LENGTH_2:
payload_length_ =
payload_length_ * 256 + static_cast<uint8_t>(p[i]);
++i;
read_padding_state_ = STATE_READ_PADDING_LENGTH;
break;
case STATE_READ_PADDING_LENGTH:
padding_length_ = static_cast<uint8_t>(p[i]);
++i;
read_padding_state_ = STATE_READ_PAYLOAD;
break;
case STATE_READ_PAYLOAD:
if (payload_length_ <= size - i) {
copy_size = payload_length_;
read_padding_state_ = STATE_READ_PADDING;
} else {
copy_size = size - i;
}
std::memcpy(unpadded_ptr, p + i, copy_size);
unpadded_ptr += copy_size;
i += copy_size;
payload_length_ -= copy_size;
break;
case STATE_READ_PADDING:
if (padding_length_ <= size - i) {
copy_size = padding_length_;
read_padding_state_ = STATE_READ_PAYLOAD_LENGTH_1;
++num_paddings_[from];
} else {
copy_size = size - i;
}
i += copy_size;
padding_length_ -= copy_size;
break;
}
}
write_size = unpadded_ptr - unpadded_buffer->data();
read_buffers_[from] = unpadded_buffer;
}
if (write_size == 0) {
OnPushComplete(from, to, OK);
return;
}
}
write_buffers_[to] = base::MakeRefCounted<DrainableIOBuffer>(
std::move(read_buffers_[from]), write_offset + write_size);
if (write_offset) {
write_buffers_[to]->DidConsume(write_offset);
}
write_pending_[to] = true;
DCHECK(sockets_[to]);
int rv = sockets_[to]->Write(
write_buffers_[to].get(), write_size,
base::BindRepeating(&NaiveConnection::OnPushComplete,
weak_ptr_factory_.GetWeakPtr(), from, to),
traffic_annotation_);
if (rv != ERR_IO_PENDING)
OnPushComplete(from, to, rv);
}
void NaiveConnection::Disconnect(Direction side) {
if (sockets_[side]) {
sockets_[side]->Disconnect();
sockets_[side] = nullptr;
write_pending_[side] = false;
}
}
bool NaiveConnection::IsConnected(Direction side) {
return sockets_[side];
}
void NaiveConnection::OnBothDisconnected() {
if (run_callback_) {
int error = OK;
if (errors_[kClient] != ERR_CONNECTION_CLOSED && errors_[kClient] < 0)
error = errors_[kClient];
if (errors_[kServer] != ERR_CONNECTION_CLOSED && errors_[kClient] < 0)
error = errors_[kServer];
std::move(run_callback_).Run(error);
}
}
void NaiveConnection::OnPullError(Direction from, Direction to, int error) {
DCHECK_LT(error, 0);
errors_[from] = error;
Disconnect(from);
if (!write_pending_[to])
Disconnect(to);
if (!IsConnected(from) && !IsConnected(to))
OnBothDisconnected();
}
void NaiveConnection::OnPushError(Direction from, Direction to, int error) {
DCHECK_LE(error, 0);
DCHECK(!write_pending_[to]);
if (error < 0) {
errors_[to] = error;
Disconnect(kServer);
Disconnect(kClient);
} else if (!IsConnected(from)) {
Disconnect(to);
}
if (!IsConnected(from) && !IsConnected(to))
OnBothDisconnected();
}
void NaiveConnection::OnPullComplete(Direction from, Direction to, int result) {
if (from == kClient && early_pull_pending_) {
early_pull_pending_ = false;
early_pull_result_ = result ? result : ERR_CONNECTION_CLOSED;
}
if (result <= 0) {
OnPullError(from, to, result ? result : ERR_CONNECTION_CLOSED);
return;
}
if (from == kClient && !can_push_to_server_)
return;
Push(from, to, result);
}
void NaiveConnection::OnPushComplete(Direction from, Direction to, int result) {
if (result >= 0 && write_buffers_[to] != nullptr) {
bytes_passed_without_yielding_[from] += result;
write_buffers_[to]->DidConsume(result);
int size = write_buffers_[to]->BytesRemaining();
if (size > 0) {
int rv = sockets_[to]->Write(
write_buffers_[to].get(), size,
base::BindRepeating(&NaiveConnection::OnPushComplete,
weak_ptr_factory_.GetWeakPtr(), from, to),
traffic_annotation_);
if (rv != ERR_IO_PENDING)
OnPushComplete(from, to, rv);
return;
}
}
write_pending_[to] = false;
// Checks for termination even if result is OK.
OnPushError(from, to, result >= 0 ? OK : result);
if (bytes_passed_without_yielding_[from] > kYieldAfterBytesRead ||
time_func_() > yield_after_time_[from]) {
bytes_passed_without_yielding_[from] = 0;
yield_after_time_[from] =
time_func_() + base::Milliseconds(kYieldAfterDurationMilliseconds);
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::BindRepeating(&NaiveConnection::Pull,
weak_ptr_factory_.GetWeakPtr(), from, to));
} else {
Pull(from, to);
}
}
} // namespace net

View File

@ -0,0 +1,142 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Copyright 2018 klzgrad <kizdiv@gmail.com>. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef NET_TOOLS_NAIVE_NAIVE_CONNECTION_H_
#define NET_TOOLS_NAIVE_NAIVE_CONNECTION_H_
#include <memory>
#include <string>
#include "base/memory/scoped_refptr.h"
#include "base/memory/weak_ptr.h"
#include "base/time/time.h"
#include "net/base/completion_once_callback.h"
#include "net/base/completion_repeating_callback.h"
#include "net/tools/naive/naive_protocol.h"
#include "net/tools/naive/naive_proxy_delegate.h"
namespace net {
class ClientSocketHandle;
class DrainableIOBuffer;
class HttpNetworkSession;
class IOBuffer;
class NetLogWithSource;
class ProxyInfo;
class StreamSocket;
struct NetworkTrafficAnnotationTag;
struct SSLConfig;
class RedirectResolver;
class NetworkIsolationKey;
class NaiveConnection {
public:
using TimeFunc = base::TimeTicks (*)();
NaiveConnection(
unsigned int id,
ClientProtocol protocol,
std::unique_ptr<PaddingDetectorDelegate> padding_detector_delegate,
const ProxyInfo& proxy_info,
const SSLConfig& server_ssl_config,
const SSLConfig& proxy_ssl_config,
RedirectResolver* resolver,
HttpNetworkSession* session,
const NetworkIsolationKey& network_isolation_key,
const NetLogWithSource& net_log,
std::unique_ptr<StreamSocket> accepted_socket,
const NetworkTrafficAnnotationTag& traffic_annotation);
~NaiveConnection();
NaiveConnection(const NaiveConnection&) = delete;
NaiveConnection& operator=(const NaiveConnection&) = delete;
unsigned int id() const { return id_; }
int Connect(CompletionOnceCallback callback);
void Disconnect();
int Run(CompletionOnceCallback callback);
private:
enum State {
STATE_CONNECT_CLIENT,
STATE_CONNECT_CLIENT_COMPLETE,
STATE_CONNECT_SERVER,
STATE_CONNECT_SERVER_COMPLETE,
STATE_NONE,
};
enum PaddingState {
STATE_READ_PAYLOAD_LENGTH_1,
STATE_READ_PAYLOAD_LENGTH_2,
STATE_READ_PADDING_LENGTH,
STATE_READ_PAYLOAD,
STATE_READ_PADDING,
};
void DoCallback(int result);
void OnIOComplete(int result);
int DoLoop(int last_io_result);
int DoConnectClient();
int DoConnectClientComplete(int result);
int DoConnectServer();
int DoConnectServerComplete(int result);
void Pull(Direction from, Direction to);
void Push(Direction from, Direction to, int size);
void Disconnect(Direction side);
bool IsConnected(Direction side);
void OnBothDisconnected();
void OnPullError(Direction from, Direction to, int error);
void OnPushError(Direction from, Direction to, int error);
void OnPullComplete(Direction from, Direction to, int result);
void OnPushComplete(Direction from, Direction to, int result);
unsigned int id_;
ClientProtocol protocol_;
std::unique_ptr<PaddingDetectorDelegate> padding_detector_delegate_;
const ProxyInfo& proxy_info_;
const SSLConfig& server_ssl_config_;
const SSLConfig& proxy_ssl_config_;
RedirectResolver* resolver_;
HttpNetworkSession* session_;
const NetworkIsolationKey& network_isolation_key_;
const NetLogWithSource& net_log_;
CompletionRepeatingCallback io_callback_;
CompletionOnceCallback connect_callback_;
CompletionOnceCallback run_callback_;
State next_state_;
std::unique_ptr<StreamSocket> client_socket_;
std::unique_ptr<ClientSocketHandle> server_socket_handle_;
StreamSocket* sockets_[kNumDirections];
scoped_refptr<IOBuffer> read_buffers_[kNumDirections];
scoped_refptr<DrainableIOBuffer> write_buffers_[kNumDirections];
int errors_[kNumDirections];
bool write_pending_[kNumDirections];
int bytes_passed_without_yielding_[kNumDirections];
base::TimeTicks yield_after_time_[kNumDirections];
bool early_pull_pending_;
bool can_push_to_server_;
int early_pull_result_;
int num_paddings_[kNumDirections];
PaddingState read_padding_state_;
int payload_length_;
int padding_length_;
bool full_duplex_;
TimeFunc time_func_;
// Traffic annotation for socket control.
const NetworkTrafficAnnotationTag& traffic_annotation_;
base::WeakPtrFactory<NaiveConnection> weak_ptr_factory_{this};
};
} // namespace net
#endif // NET_TOOLS_NAIVE_NAIVE_CONNECTION_H_

View File

@ -0,0 +1,24 @@
// Copyright 2020 klzgrad <kizdiv@gmail.com>. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef NET_TOOLS_NAIVE_NAIVE_PROTOCOL_H_
#define NET_TOOLS_NAIVE_NAIVE_PROTOCOL_H_
namespace net {
enum class ClientProtocol {
kSocks5,
kHttp,
kRedir,
};
// Adds padding for traffic from this direction.
// Removes padding for traffic from the opposite direction.
enum Direction {
kClient = 0,
kServer = 1,
kNumDirections = 2,
kNone = 2,
};
} // namespace net
#endif // NET_TOOLS_NAIVE_NAIVE_PROTOCOL_H_

View File

@ -0,0 +1,211 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Copyright 2018 klzgrad <kizdiv@gmail.com>. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/tools/naive/naive_proxy.h"
#include <utility>
#include "base/bind.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/threading/thread_task_runner_handle.h"
#include "net/base/load_flags.h"
#include "net/base/net_errors.h"
#include "net/http/http_network_session.h"
#include "net/proxy_resolution/configured_proxy_resolution_service.h"
#include "net/proxy_resolution/proxy_config.h"
#include "net/proxy_resolution/proxy_list.h"
#include "net/socket/client_socket_pool_manager.h"
#include "net/socket/server_socket.h"
#include "net/socket/stream_socket.h"
#include "net/tools/naive/http_proxy_socket.h"
#include "net/tools/naive/naive_proxy_delegate.h"
#include "net/tools/naive/socks5_server_socket.h"
namespace net {
NaiveProxy::NaiveProxy(std::unique_ptr<ServerSocket> listen_socket,
ClientProtocol protocol,
const std::string& listen_user,
const std::string& listen_pass,
int concurrency,
RedirectResolver* resolver,
HttpNetworkSession* session,
const NetworkTrafficAnnotationTag& traffic_annotation)
: listen_socket_(std::move(listen_socket)),
protocol_(protocol),
listen_user_(listen_user),
listen_pass_(listen_pass),
concurrency_(concurrency),
resolver_(resolver),
session_(session),
net_log_(
NetLogWithSource::Make(session->net_log(), NetLogSourceType::NONE)),
last_id_(0),
traffic_annotation_(traffic_annotation) {
const auto& proxy_config = static_cast<ConfiguredProxyResolutionService*>(
session_->proxy_resolution_service())
->config();
DCHECK(proxy_config);
const ProxyList& proxy_list =
proxy_config.value().value().proxy_rules().single_proxies;
DCHECK(!proxy_list.IsEmpty());
proxy_info_.UseProxyList(proxy_list);
proxy_info_.set_traffic_annotation(
net::MutableNetworkTrafficAnnotationTag(traffic_annotation_));
// See HttpStreamFactory::Job::DoInitConnectionImpl()
proxy_ssl_config_.disable_cert_verification_network_fetches = true;
server_ssl_config_.alpn_protos = session_->GetAlpnProtos();
proxy_ssl_config_.alpn_protos = session_->GetAlpnProtos();
server_ssl_config_.application_settings = session_->GetApplicationSettings();
proxy_ssl_config_.application_settings = session_->GetApplicationSettings();
server_ssl_config_.ignore_certificate_errors =
session_->params().ignore_certificate_errors;
proxy_ssl_config_.ignore_certificate_errors =
session_->params().ignore_certificate_errors;
// TODO(https://crbug.com/964642): Also enable 0-RTT for TLS proxies.
server_ssl_config_.early_data_enabled = session_->params().enable_early_data;
for (int i = 0; i < concurrency_; i++) {
network_isolation_keys_.push_back(NetworkIsolationKey::CreateTransient());
}
DCHECK(listen_socket_);
// Start accepting connections in next run loop in case when delegate is not
// ready to get callbacks.
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::BindOnce(&NaiveProxy::DoAcceptLoop,
weak_ptr_factory_.GetWeakPtr()));
}
NaiveProxy::~NaiveProxy() = default;
void NaiveProxy::DoAcceptLoop() {
int result;
do {
result = listen_socket_->Accept(
&accepted_socket_, base::BindRepeating(&NaiveProxy::OnAcceptComplete,
weak_ptr_factory_.GetWeakPtr()));
if (result == ERR_IO_PENDING)
return;
HandleAcceptResult(result);
} while (result == OK);
}
void NaiveProxy::OnAcceptComplete(int result) {
HandleAcceptResult(result);
if (result == OK)
DoAcceptLoop();
}
void NaiveProxy::HandleAcceptResult(int result) {
if (result != OK) {
LOG(ERROR) << "Accept error: rv=" << result;
return;
}
DoConnect();
}
void NaiveProxy::DoConnect() {
std::unique_ptr<StreamSocket> socket;
auto* proxy_delegate =
static_cast<NaiveProxyDelegate*>(session_->context().proxy_delegate);
DCHECK(proxy_delegate);
DCHECK(!proxy_info_.is_empty());
const auto& proxy_server = proxy_info_.proxy_server();
auto padding_detector_delegate = std::make_unique<PaddingDetectorDelegate>(
proxy_delegate, proxy_server, protocol_);
if (protocol_ == ClientProtocol::kSocks5) {
socket = std::make_unique<Socks5ServerSocket>(std::move(accepted_socket_),
listen_user_, listen_pass_,
traffic_annotation_);
} else if (protocol_ == ClientProtocol::kHttp) {
socket = std::make_unique<HttpProxySocket>(std::move(accepted_socket_),
padding_detector_delegate.get(),
traffic_annotation_);
} else if (protocol_ == ClientProtocol::kRedir) {
socket = std::move(accepted_socket_);
} else {
return;
}
last_id_++;
const auto& nik = network_isolation_keys_[last_id_ % concurrency_];
auto connection_ptr = std::make_unique<NaiveConnection>(
last_id_, protocol_, std::move(padding_detector_delegate), proxy_info_,
server_ssl_config_, proxy_ssl_config_, resolver_, session_, nik, net_log_,
std::move(socket), traffic_annotation_);
auto* connection = connection_ptr.get();
connection_by_id_[connection->id()] = std::move(connection_ptr);
int result = connection->Connect(
base::BindRepeating(&NaiveProxy::OnConnectComplete,
weak_ptr_factory_.GetWeakPtr(), connection->id()));
if (result == ERR_IO_PENDING)
return;
HandleConnectResult(connection, result);
}
void NaiveProxy::OnConnectComplete(unsigned int connection_id, int result) {
auto* connection = FindConnection(connection_id);
if (!connection)
return;
HandleConnectResult(connection, result);
}
void NaiveProxy::HandleConnectResult(NaiveConnection* connection, int result) {
if (result != OK) {
Close(connection->id(), result);
return;
}
DoRun(connection);
}
void NaiveProxy::DoRun(NaiveConnection* connection) {
int result = connection->Run(
base::BindRepeating(&NaiveProxy::OnRunComplete,
weak_ptr_factory_.GetWeakPtr(), connection->id()));
if (result == ERR_IO_PENDING)
return;
HandleRunResult(connection, result);
}
void NaiveProxy::OnRunComplete(unsigned int connection_id, int result) {
auto* connection = FindConnection(connection_id);
if (!connection)
return;
HandleRunResult(connection, result);
}
void NaiveProxy::HandleRunResult(NaiveConnection* connection, int result) {
Close(connection->id(), result);
}
void NaiveProxy::Close(unsigned int connection_id, int reason) {
auto it = connection_by_id_.find(connection_id);
if (it == connection_by_id_.end())
return;
LOG(INFO) << "Connection " << connection_id
<< " closed: " << ErrorToShortString(reason);
// The call stack might have callbacks which still have the pointer of
// connection. Instead of referencing connection with ID all the time,
// destroys the connection in next run loop to make sure any pending
// callbacks in the call stack return.
base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE,
std::move(it->second));
connection_by_id_.erase(it);
}
NaiveConnection* NaiveProxy::FindConnection(unsigned int connection_id) {
auto it = connection_by_id_.find(connection_id);
if (it == connection_by_id_.end())
return nullptr;
return it->second.get();
}
} // namespace net

View File

@ -0,0 +1,89 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Copyright 2018 klzgrad <kizdiv@gmail.com>. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef NET_TOOLS_NAIVE_NAIVE_PROXY_H_
#define NET_TOOLS_NAIVE_NAIVE_PROXY_H_
#include <map>
#include <memory>
#include <vector>
#include "base/memory/weak_ptr.h"
#include "net/base/completion_repeating_callback.h"
#include "net/base/network_isolation_key.h"
#include "net/log/net_log_with_source.h"
#include "net/proxy_resolution/proxy_info.h"
#include "net/ssl/ssl_config.h"
#include "net/tools/naive/naive_connection.h"
#include "net/tools/naive/naive_protocol.h"
namespace net {
class ClientSocketHandle;
class HttpNetworkSession;
class NaiveConnection;
class ServerSocket;
class StreamSocket;
struct NetworkTrafficAnnotationTag;
class RedirectResolver;
class NaiveProxy {
public:
NaiveProxy(std::unique_ptr<ServerSocket> server_socket,
ClientProtocol protocol,
const std::string& listen_user,
const std::string& listen_pass,
int concurrency,
RedirectResolver* resolver,
HttpNetworkSession* session,
const NetworkTrafficAnnotationTag& traffic_annotation);
~NaiveProxy();
NaiveProxy(const NaiveProxy&) = delete;
NaiveProxy& operator=(const NaiveProxy&) = delete;
private:
void DoAcceptLoop();
void OnAcceptComplete(int result);
void HandleAcceptResult(int result);
void DoConnect();
void OnConnectComplete(unsigned int connection_id, int result);
void HandleConnectResult(NaiveConnection* connection, int result);
void DoRun(NaiveConnection* connection);
void OnRunComplete(unsigned int connection_id, int result);
void HandleRunResult(NaiveConnection* connection, int result);
void Close(unsigned int connection_id, int reason);
NaiveConnection* FindConnection(unsigned int connection_id);
std::unique_ptr<ServerSocket> listen_socket_;
ClientProtocol protocol_;
std::string listen_user_;
std::string listen_pass_;
int concurrency_;
ProxyInfo proxy_info_;
SSLConfig server_ssl_config_;
SSLConfig proxy_ssl_config_;
RedirectResolver* resolver_;
HttpNetworkSession* session_;
NetLogWithSource net_log_;
unsigned int last_id_;
std::unique_ptr<StreamSocket> accepted_socket_;
std::vector<NetworkIsolationKey> network_isolation_keys_;
std::map<unsigned int, std::unique_ptr<NaiveConnection>> connection_by_id_;
const NetworkTrafficAnnotationTag& traffic_annotation_;
base::WeakPtrFactory<NaiveProxy> weak_ptr_factory_{this};
};
} // namespace net
#endif // NET_TOOLS_NAIVE_NAIVE_PROXY_H_

View File

@ -0,0 +1,592 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Copyright 2018 klzgrad <kizdiv@gmail.com>. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <cstdlib>
#include <iostream>
#include <limits>
#include <memory>
#include <string>
#include "base/at_exit.h"
#include "base/command_line.h"
#include "base/feature_list.h"
#include "base/files/file_path.h"
#include "base/json/json_file_value_serializer.h"
#include "base/json/json_writer.h"
#include "base/logging.h"
#include "base/rand_util.h"
#include "base/run_loop.h"
#include "base/strings/escape.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
#include "base/system/sys_info.h"
#include "base/task/single_thread_task_executor.h"
#include "base/task/thread_pool/thread_pool_instance.h"
#include "base/values.h"
#include "build/build_config.h"
#include "components/version_info/version_info.h"
#include "net/base/auth.h"
#include "net/base/network_isolation_key.h"
#include "net/base/url_util.h"
#include "net/cert/cert_verifier.h"
#include "net/cert_net/cert_net_fetcher_url_request.h"
#include "net/dns/host_resolver.h"
#include "net/dns/mapped_host_resolver.h"
#include "net/http/http_auth.h"
#include "net/http/http_auth_cache.h"
#include "net/http/http_network_session.h"
#include "net/http/http_request_headers.h"
#include "net/http/http_transaction_factory.h"
#include "net/log/file_net_log_observer.h"
#include "net/log/net_log.h"
#include "net/log/net_log_capture_mode.h"
#include "net/log/net_log_entry.h"
#include "net/log/net_log_event_type.h"
#include "net/log/net_log_source.h"
#include "net/log/net_log_util.h"
#include "net/proxy_resolution/configured_proxy_resolution_service.h"
#include "net/proxy_resolution/proxy_config.h"
#include "net/proxy_resolution/proxy_config_service_fixed.h"
#include "net/proxy_resolution/proxy_config_with_annotation.h"
#include "net/socket/client_socket_pool_manager.h"
#include "net/socket/ssl_client_socket.h"
#include "net/socket/tcp_server_socket.h"
#include "net/socket/udp_server_socket.h"
#include "net/ssl/ssl_key_logger_impl.h"
#include "net/third_party/quiche/src/quic/core/quic_versions.h"
#include "net/tools/naive/naive_protocol.h"
#include "net/tools/naive/naive_proxy.h"
#include "net/tools/naive/naive_proxy_delegate.h"
#include "net/tools/naive/redirect_resolver.h"
#include "net/traffic_annotation/network_traffic_annotation.h"
#include "net/url_request/url_request_context.h"
#include "net/url_request/url_request_context_builder.h"
#include "url/gurl.h"
#include "url/scheme_host_port.h"
#include "url/url_util.h"
#if defined(OS_MACOSX)
#include "base/mac/scoped_nsautorelease_pool.h"
#endif
namespace {
constexpr int kListenBackLog = 512;
constexpr int kDefaultMaxSocketsPerPool = 256;
constexpr int kDefaultMaxSocketsPerGroup = 255;
constexpr int kExpectedMaxUsers = 8;
constexpr net::NetworkTrafficAnnotationTag kTrafficAnnotation =
net::DefineNetworkTrafficAnnotation("naive", "");
struct CommandLine {
std::string listen;
std::string proxy;
std::string concurrency;
std::string extra_headers;
std::string host_resolver_rules;
std::string resolver_range;
bool no_log;
base::FilePath log;
base::FilePath log_net_log;
base::FilePath ssl_key_log_file;
};
struct Params {
net::ClientProtocol protocol;
std::string listen_user;
std::string listen_pass;
std::string listen_addr;
int listen_port;
int concurrency;
net::HttpRequestHeaders extra_headers;
std::string proxy_url;
std::u16string proxy_user;
std::u16string proxy_pass;
std::string host_resolver_rules;
net::IPAddress resolver_range;
size_t resolver_prefix;
logging::LoggingSettings log_settings;
base::FilePath net_log_path;
base::FilePath ssl_key_path;
};
std::unique_ptr<base::Value> GetConstants() {
auto constants_dict = std::make_unique<base::Value>(net::GetNetConstants());
base::DictionaryValue dict;
std::string os_type = base::StringPrintf(
"%s: %s (%s)", base::SysInfo::OperatingSystemName().c_str(),
base::SysInfo::OperatingSystemVersion().c_str(),
base::SysInfo::OperatingSystemArchitecture().c_str());
dict.SetStringPath("os_type", os_type);
constants_dict->SetKey("clientInfo", std::move(dict));
return constants_dict;
}
void GetCommandLine(const base::CommandLine& proc, CommandLine* cmdline) {
if (proc.HasSwitch("h") || proc.HasSwitch("help")) {
std::cout << "Usage: naive { OPTIONS | config.json }\n"
"\n"
"Options:\n"
"-h, --help Show this message\n"
"--version Print version\n"
"--listen=<proto>://[addr][:port]\n"
" proto: socks, http\n"
" redir (Linux only)\n"
"--proxy=<proto>://[<user>:<pass>@]<hostname>[:<port>]\n"
" proto: https, quic\n"
"--insecure-concurrency=<N> Use N connections, insecure\n"
"--extra-headers=... Extra headers split by CRLF\n"
"--host-resolver-rules=... Resolver rules\n"
"--resolver-range=... Redirect resolver range\n"
"--log[=<path>] Log to stderr, or file\n"
"--log-net-log=<path> Save NetLog\n"
"--ssl-key-log-file=<path> Save SSL keys for Wireshark\n"
<< std::endl;
exit(EXIT_SUCCESS);
}
if (proc.HasSwitch("version")) {
std::cout << "naive " << version_info::GetVersionNumber() << std::endl;
exit(EXIT_SUCCESS);
}
cmdline->listen = proc.GetSwitchValueASCII("listen");
cmdline->proxy = proc.GetSwitchValueASCII("proxy");
cmdline->concurrency = proc.GetSwitchValueASCII("insecure-concurrency");
cmdline->extra_headers = proc.GetSwitchValueASCII("extra-headers");
cmdline->host_resolver_rules =
proc.GetSwitchValueASCII("host-resolver-rules");
cmdline->resolver_range = proc.GetSwitchValueASCII("resolver-range");
cmdline->no_log = !proc.HasSwitch("log");
cmdline->log = proc.GetSwitchValuePath("log");
cmdline->log_net_log = proc.GetSwitchValuePath("log-net-log");
cmdline->ssl_key_log_file = proc.GetSwitchValuePath("ssl-key-log-file");
}
void GetCommandLineFromConfig(const base::FilePath& config_path,
CommandLine* cmdline) {
JSONFileValueDeserializer reader(config_path);
int error_code;
std::string error_message;
auto value = reader.Deserialize(&error_code, &error_message);
if (value == nullptr) {
std::cerr << "Error reading " << config_path << ": (" << error_code << ") "
<< error_message << std::endl;
exit(EXIT_FAILURE);
}
if (!value->is_dict()) {
std::cerr << "Invalid config format" << std::endl;
exit(EXIT_FAILURE);
}
const auto* listen = value->FindStringKey("listen");
if (listen) {
cmdline->listen = *listen;
}
const auto* proxy = value->FindStringKey("proxy");
if (proxy) {
cmdline->proxy = *proxy;
}
const auto* concurrency = value->FindStringKey("insecure-concurrency");
if (concurrency) {
cmdline->concurrency = *concurrency;
}
const auto* extra_headers = value->FindStringKey("extra-headers");
if (extra_headers) {
cmdline->extra_headers = *extra_headers;
}
const auto* host_resolver_rules = value->FindStringKey("host-resolver-rules");
if (host_resolver_rules) {
cmdline->host_resolver_rules = *host_resolver_rules;
}
const auto* resolver_range = value->FindStringKey("resolver-range");
if (resolver_range) {
cmdline->resolver_range = *resolver_range;
}
cmdline->no_log = true;
const auto* log = value->FindStringKey("log");
if (log) {
cmdline->no_log = false;
cmdline->log = base::FilePath::FromUTF8Unsafe(*log);
}
const auto* log_net_log = value->FindStringKey("log-net-log");
if (log_net_log) {
cmdline->log_net_log = base::FilePath::FromUTF8Unsafe(*log_net_log);
}
const auto* ssl_key_log_file = value->FindStringKey("ssl-key-log-file");
if (ssl_key_log_file) {
cmdline->ssl_key_log_file =
base::FilePath::FromUTF8Unsafe(*ssl_key_log_file);
}
}
std::string GetProxyFromURL(const GURL& url) {
std::string str = url.GetWithEmptyPath().spec();
if (str.size() && str.back() == '/') {
str.pop_back();
}
return str;
}
bool ParseCommandLine(const CommandLine& cmdline, Params* params) {
params->protocol = net::ClientProtocol::kSocks5;
params->listen_addr = "0.0.0.0";
params->listen_port = 1080;
url::AddStandardScheme("socks",
url::SCHEME_WITH_HOST_PORT_AND_USER_INFORMATION);
url::AddStandardScheme("redir", url::SCHEME_WITH_HOST_AND_PORT);
if (!cmdline.listen.empty()) {
GURL url(cmdline.listen);
if (url.scheme() == "socks") {
params->protocol = net::ClientProtocol::kSocks5;
params->listen_port = 1080;
} else if (url.scheme() == "http") {
params->protocol = net::ClientProtocol::kHttp;
params->listen_port = 8080;
} else if (url.scheme() == "redir") {
#if defined(OS_LINUX)
params->protocol = net::ClientProtocol::kRedir;
params->listen_port = 1080;
#else
std::cerr << "Redir protocol only supports Linux." << std::endl;
return false;
#endif
} else {
std::cerr << "Invalid scheme in --listen" << std::endl;
return false;
}
if (!url.username().empty()) {
params->listen_user = base::UnescapeBinaryURLComponent(url.username());
}
if (!url.password().empty()) {
params->listen_pass = base::UnescapeBinaryURLComponent(url.password());
}
if (!url.host().empty()) {
params->listen_addr = url.host();
}
if (!url.port().empty()) {
if (!base::StringToInt(url.port(), &params->listen_port)) {
std::cerr << "Invalid port in --listen" << std::endl;
return false;
}
if (params->listen_port <= 0 ||
params->listen_port > std::numeric_limits<uint16_t>::max()) {
std::cerr << "Invalid port in --listen" << std::endl;
return false;
}
}
}
params->proxy_url = "direct://";
GURL url(cmdline.proxy);
GURL::Replacements remove_auth;
remove_auth.ClearUsername();
remove_auth.ClearPassword();
GURL url_no_auth = url.ReplaceComponents(remove_auth);
if (!cmdline.proxy.empty()) {
if (!url.is_valid()) {
std::cerr << "Invalid proxy URL" << std::endl;
return false;
}
params->proxy_url = GetProxyFromURL(url_no_auth);
net::GetIdentityFromURL(url, &params->proxy_user, &params->proxy_pass);
}
if (!cmdline.concurrency.empty()) {
if (!base::StringToInt(cmdline.concurrency, &params->concurrency) ||
params->concurrency < 1) {
std::cerr << "Invalid concurrency" << std::endl;
return false;
}
} else {
params->concurrency = 1;
}
params->extra_headers.AddHeadersFromString(cmdline.extra_headers);
params->host_resolver_rules = cmdline.host_resolver_rules;
if (params->protocol == net::ClientProtocol::kRedir) {
std::string range = "100.64.0.0/10";
if (!cmdline.resolver_range.empty())
range = cmdline.resolver_range;
if (!net::ParseCIDRBlock(range, &params->resolver_range,
&params->resolver_prefix)) {
std::cerr << "Invalid resolver range" << std::endl;
return false;
}
if (params->resolver_range.IsIPv6()) {
std::cerr << "IPv6 resolver range not supported" << std::endl;
return false;
}
}
if (!cmdline.no_log) {
if (!cmdline.log.empty()) {
params->log_settings.logging_dest = logging::LOG_TO_FILE;
params->log_settings.log_file_path = cmdline.log.value().c_str();
} else {
params->log_settings.logging_dest = logging::LOG_TO_STDERR;
}
} else {
params->log_settings.logging_dest = logging::LOG_NONE;
}
params->net_log_path = cmdline.log_net_log;
params->ssl_key_path = cmdline.ssl_key_log_file;
return true;
}
} // namespace
namespace net {
namespace {
// NetLog::ThreadSafeObserver implementation that simply prints events
// to the logs.
class PrintingLogObserver : public NetLog::ThreadSafeObserver {
public:
PrintingLogObserver() = default;
PrintingLogObserver(const PrintingLogObserver&) = delete;
PrintingLogObserver& operator=(const PrintingLogObserver&) = delete;
~PrintingLogObserver() override {
// This is guaranteed to be safe as this program is single threaded.
net_log()->RemoveObserver(this);
}
// NetLog::ThreadSafeObserver implementation:
void OnAddEntry(const NetLogEntry& entry) override {
switch (entry.type) {
case NetLogEventType::SOCKET_POOL_STALLED_MAX_SOCKETS:
case NetLogEventType::SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP:
case NetLogEventType::HTTP2_SESSION_STREAM_STALLED_BY_SESSION_SEND_WINDOW:
case NetLogEventType::HTTP2_SESSION_STREAM_STALLED_BY_STREAM_SEND_WINDOW:
case NetLogEventType::HTTP2_SESSION_STALLED_MAX_STREAMS:
case NetLogEventType::HTTP2_STREAM_FLOW_CONTROL_UNSTALLED:
break;
default:
return;
}
const char* source_type = NetLog::SourceTypeToString(entry.source.type);
const char* event_type = NetLogEventTypeToString(entry.type);
const char* event_phase = NetLog::EventPhaseToString(entry.phase);
base::Value params(entry.ToValue());
std::string params_str;
base::JSONWriter::Write(params, &params_str);
params_str.insert(0, ": ");
VLOG(1) << source_type << "(" << entry.source.id << "): " << event_type
<< ": " << event_phase << params_str;
}
};
} // namespace
namespace {
std::unique_ptr<URLRequestContext> BuildCertURLRequestContext(NetLog* net_log) {
URLRequestContextBuilder builder;
builder.DisableHttpCache();
builder.set_net_log(net_log);
ProxyConfig proxy_config;
auto proxy_service =
ConfiguredProxyResolutionService::CreateWithoutProxyResolver(
std::make_unique<ProxyConfigServiceFixed>(
ProxyConfigWithAnnotation(proxy_config, kTrafficAnnotation)),
net_log);
proxy_service->ForceReloadProxyConfig();
builder.set_proxy_resolution_service(std::move(proxy_service));
return builder.Build();
}
// Builds a URLRequestContext assuming there's only a single loop.
std::unique_ptr<URLRequestContext> BuildURLRequestContext(
const Params& params,
scoped_refptr<CertNetFetcherURLRequest> cert_net_fetcher,
NetLog* net_log) {
URLRequestContextBuilder builder;
builder.DisableHttpCache();
builder.set_net_log(net_log);
ProxyConfig proxy_config;
proxy_config.proxy_rules().ParseFromString(params.proxy_url);
LOG(INFO) << "Proxying via " << params.proxy_url;
auto proxy_service =
ConfiguredProxyResolutionService::CreateWithoutProxyResolver(
std::make_unique<ProxyConfigServiceFixed>(
ProxyConfigWithAnnotation(proxy_config, kTrafficAnnotation)),
net_log);
proxy_service->ForceReloadProxyConfig();
builder.set_proxy_resolution_service(std::move(proxy_service));
if (!params.host_resolver_rules.empty()) {
builder.set_host_mapping_rules(params.host_resolver_rules);
}
builder.SetCertVerifier(
CertVerifier::CreateDefault(std::move(cert_net_fetcher)));
builder.set_proxy_delegate(
std::make_unique<NaiveProxyDelegate>(params.extra_headers));
auto context = builder.Build();
if (!params.proxy_url.empty() && !params.proxy_user.empty() &&
!params.proxy_pass.empty()) {
auto* session = context->http_transaction_factory()->GetSession();
auto* auth_cache = session->http_auth_cache();
std::string proxy_url = params.proxy_url;
GURL proxy_gurl(proxy_url);
if (proxy_url.compare(0, 7, "quic://") == 0) {
proxy_url.replace(0, 4, "https");
proxy_gurl = GURL(proxy_url);
auto* quic = context->quic_context()->params();
quic->supported_versions = {quic::ParsedQuicVersion::RFCv1()};
quic->origins_to_force_quic_on.insert(
net::HostPortPair::FromURL(proxy_gurl));
}
url::SchemeHostPort auth_origin(proxy_gurl);
AuthCredentials credentials(params.proxy_user, params.proxy_pass);
auth_cache->Add(auth_origin, HttpAuth::AUTH_PROXY,
/*realm=*/{}, HttpAuth::AUTH_SCHEME_BASIC, {},
/*challenge=*/"Basic", credentials, /*path=*/"/");
}
return context;
}
} // namespace
} // namespace net
int main(int argc, char* argv[]) {
url::AddStandardScheme("quic",
url::SCHEME_WITH_HOST_PORT_AND_USER_INFORMATION);
base::FeatureList::InitializeInstance(
"PartitionConnectionsByNetworkIsolationKey", std::string());
base::SingleThreadTaskExecutor io_task_executor(base::MessagePumpType::IO);
base::ThreadPoolInstance::CreateAndStartWithDefaultParams("naive");
base::AtExitManager exit_manager;
#if defined(OS_MACOSX)
base::mac::ScopedNSAutoreleasePool pool;
#endif
base::CommandLine::Init(argc, argv);
CommandLine cmdline;
Params params;
const auto& proc = *base::CommandLine::ForCurrentProcess();
const auto& args = proc.GetArgs();
if (args.empty()) {
if (proc.argv().size() >= 2) {
GetCommandLine(proc, &cmdline);
} else {
auto path = base::FilePath::FromUTF8Unsafe("config.json");
GetCommandLineFromConfig(path, &cmdline);
}
} else {
base::FilePath path(args[0]);
GetCommandLineFromConfig(path, &cmdline);
}
if (!ParseCommandLine(cmdline, &params)) {
return EXIT_FAILURE;
}
net::ClientSocketPoolManager::set_max_sockets_per_pool(
net::HttpNetworkSession::NORMAL_SOCKET_POOL,
kDefaultMaxSocketsPerPool * kExpectedMaxUsers);
net::ClientSocketPoolManager::set_max_sockets_per_proxy_server(
net::HttpNetworkSession::NORMAL_SOCKET_POOL,
kDefaultMaxSocketsPerPool * kExpectedMaxUsers);
net::ClientSocketPoolManager::set_max_sockets_per_group(
net::HttpNetworkSession::NORMAL_SOCKET_POOL,
kDefaultMaxSocketsPerGroup * kExpectedMaxUsers);
CHECK(logging::InitLogging(params.log_settings));
if (!params.ssl_key_path.empty()) {
net::SSLClientSocket::SetSSLKeyLogger(
std::make_unique<net::SSLKeyLoggerImpl>(params.ssl_key_path));
}
// The declaration order for net_log and printing_log_observer is
// important. The destructor of PrintingLogObserver removes itself
// from net_log, so net_log must be available for entire lifetime of
// printing_log_observer.
net::NetLog* net_log = net::NetLog::Get();
std::unique_ptr<net::FileNetLogObserver> observer;
if (!params.net_log_path.empty()) {
observer = net::FileNetLogObserver::CreateUnbounded(
params.net_log_path, net::NetLogCaptureMode::kDefault, GetConstants());
observer->StartObserving(net_log);
}
// Avoids net log overhead if verbose logging is disabled.
std::unique_ptr<net::PrintingLogObserver> printing_log_observer;
if (params.log_settings.logging_dest != logging::LOG_NONE && VLOG_IS_ON(1)) {
printing_log_observer = std::make_unique<net::PrintingLogObserver>();
net_log->AddObserver(printing_log_observer.get(),
net::NetLogCaptureMode::kDefault);
}
auto cert_context = net::BuildCertURLRequestContext(net_log);
scoped_refptr<net::CertNetFetcherURLRequest> cert_net_fetcher;
// The builtin verifier is supported but not enabled by default on Mac,
// falling back to CreateSystemVerifyProc() which drops the net fetcher.
// Skips defined(OS_MAC) for now, until it is enabled by default.
#if defined(OS_LINUX) || defined(OS_ANDROID)
cert_net_fetcher = base::MakeRefCounted<net::CertNetFetcherURLRequest>();
cert_net_fetcher->SetURLRequestContext(cert_context.get());
#endif
auto context =
net::BuildURLRequestContext(params, std::move(cert_net_fetcher), net_log);
auto* session = context->http_transaction_factory()->GetSession();
auto listen_socket =
std::make_unique<net::TCPServerSocket>(net_log, net::NetLogSource());
int result = listen_socket->ListenWithAddressAndPort(
params.listen_addr, params.listen_port, kListenBackLog);
if (result != net::OK) {
LOG(ERROR) << "Failed to listen: " << result;
return EXIT_FAILURE;
}
LOG(INFO) << "Listening on " << params.listen_addr << ":"
<< params.listen_port;
std::unique_ptr<net::RedirectResolver> resolver;
if (params.protocol == net::ClientProtocol::kRedir) {
auto resolver_socket =
std::make_unique<net::UDPServerSocket>(net_log, net::NetLogSource());
resolver_socket->AllowAddressReuse();
net::IPAddress listen_addr;
if (!listen_addr.AssignFromIPLiteral(params.listen_addr)) {
LOG(ERROR) << "Failed to open resolver: " << net::ERR_ADDRESS_INVALID;
return EXIT_FAILURE;
}
result = resolver_socket->Listen(
net::IPEndPoint(listen_addr, params.listen_port));
if (result != net::OK) {
LOG(ERROR) << "Failed to open resolver: " << result;
return EXIT_FAILURE;
}
resolver = std::make_unique<net::RedirectResolver>(
std::move(resolver_socket), params.resolver_range,
params.resolver_prefix);
}
net::NaiveProxy naive_proxy(std::move(listen_socket), params.protocol,
params.listen_user, params.listen_pass,
params.concurrency, resolver.get(), session,
kTrafficAnnotation);
base::RunLoop().Run();
return EXIT_SUCCESS;
}

View File

@ -0,0 +1,159 @@
// Copyright 2020 klzgrad <kizdiv@gmail.com>. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/tools/naive/naive_proxy_delegate.h"
#include <string>
#include "base/logging.h"
#include "base/rand_util.h"
#include "net/base/proxy_string_util.h"
#include "net/http/http_request_headers.h"
#include "net/http/http_response_headers.h"
#include "net/third_party/quiche/src/spdy/core/hpack/hpack_constants.h"
namespace net {
namespace {
bool g_nonindex_codes_initialized;
uint8_t g_nonindex_codes[17];
} // namespace
void InitializeNonindexCodes() {
if (g_nonindex_codes_initialized)
return;
g_nonindex_codes_initialized = true;
unsigned i = 0;
for (const auto& symbol : spdy::HpackHuffmanCodeVector()) {
if (symbol.id >= 0x20 && symbol.id <= 0x7f && symbol.length >= 8) {
g_nonindex_codes[i++] = symbol.id;
if (i >= sizeof(g_nonindex_codes))
break;
}
}
CHECK(i == sizeof(g_nonindex_codes));
}
void FillNonindexHeaderValue(uint64_t unique_bits, char* buf, int len) {
DCHECK(g_nonindex_codes_initialized);
int first = len < 16 ? len : 16;
for (int i = 0; i < first; i++) {
buf[i] = g_nonindex_codes[unique_bits & 0b1111];
unique_bits >>= 4;
}
for (int i = first; i < len; i++) {
buf[i] = g_nonindex_codes[16];
}
}
NaiveProxyDelegate::NaiveProxyDelegate(const HttpRequestHeaders& extra_headers)
: extra_headers_(extra_headers) {
InitializeNonindexCodes();
}
NaiveProxyDelegate::~NaiveProxyDelegate() = default;
void NaiveProxyDelegate::OnBeforeTunnelRequest(
const ProxyServer& proxy_server,
HttpRequestHeaders* extra_headers) {
if (proxy_server.is_direct() || proxy_server.is_socks())
return;
// Sends client-side padding header regardless of server support
std::string padding(base::RandInt(16, 32), '~');
FillNonindexHeaderValue(base::RandUint64(), &padding[0], padding.size());
extra_headers->SetHeader("padding", padding);
// Enables Fast Open in H2/H3 proxy client socket once the state of server
// padding support is known.
if (padding_state_by_server_[proxy_server] != PaddingSupport::kUnknown) {
extra_headers->SetHeader("fastopen", "1");
}
extra_headers->MergeFrom(extra_headers_);
}
Error NaiveProxyDelegate::OnTunnelHeadersReceived(
const ProxyServer& proxy_server,
const HttpResponseHeaders& response_headers) {
if (proxy_server.is_direct() || proxy_server.is_socks())
return OK;
// Detects server padding support, even if it changes dynamically.
bool padding = response_headers.HasHeader("padding");
auto new_state =
padding ? PaddingSupport::kCapable : PaddingSupport::kIncapable;
auto& padding_state = padding_state_by_server_[proxy_server];
if (padding_state == PaddingSupport::kUnknown || padding_state != new_state) {
LOG(INFO) << "Padding capability of " << ProxyServerToProxyUri(proxy_server)
<< (padding ? " detected" : " undetected");
}
padding_state = new_state;
return OK;
}
PaddingSupport NaiveProxyDelegate::GetProxyServerPaddingSupport(
const ProxyServer& proxy_server) {
// Not possible to detect padding capability given underlying protocol.
if (proxy_server.is_direct() || proxy_server.is_socks())
return PaddingSupport::kIncapable;
return padding_state_by_server_[proxy_server];
}
PaddingDetectorDelegate::PaddingDetectorDelegate(
NaiveProxyDelegate* naive_proxy_delegate,
const ProxyServer& proxy_server,
ClientProtocol client_protocol)
: naive_proxy_delegate_(naive_proxy_delegate),
proxy_server_(proxy_server),
client_protocol_(client_protocol),
detected_client_padding_support_(PaddingSupport::kUnknown),
cached_server_padding_support_(PaddingSupport::kUnknown) {}
PaddingDetectorDelegate::~PaddingDetectorDelegate() = default;
bool PaddingDetectorDelegate::IsPaddingSupportKnown() {
auto c = GetClientPaddingSupport();
auto s = GetServerPaddingSupport();
return c != PaddingSupport::kUnknown && s != PaddingSupport::kUnknown;
}
Direction PaddingDetectorDelegate::GetPaddingDirection() {
auto c = GetClientPaddingSupport();
auto s = GetServerPaddingSupport();
// Padding support must be already detected at this point.
CHECK_NE(c, PaddingSupport::kUnknown);
CHECK_NE(s, PaddingSupport::kUnknown);
if (c == PaddingSupport::kCapable && s == PaddingSupport::kIncapable) {
return kServer;
}
if (c == PaddingSupport::kIncapable && s == PaddingSupport::kCapable) {
return kClient;
}
return kNone;
}
void PaddingDetectorDelegate::SetClientPaddingSupport(
PaddingSupport padding_support) {
detected_client_padding_support_ = padding_support;
}
PaddingSupport PaddingDetectorDelegate::GetClientPaddingSupport() {
// Not possible to detect padding capability given underlying protocol.
if (client_protocol_ == ClientProtocol::kSocks5) {
return PaddingSupport::kIncapable;
} else if (client_protocol_ == ClientProtocol::kRedir) {
return PaddingSupport::kIncapable;
}
return detected_client_padding_support_;
}
PaddingSupport PaddingDetectorDelegate::GetServerPaddingSupport() {
if (cached_server_padding_support_ != PaddingSupport::kUnknown)
return cached_server_padding_support_;
cached_server_padding_support_ =
naive_proxy_delegate_->GetProxyServerPaddingSupport(proxy_server_);
return cached_server_padding_support_;
}
} // namespace net

View File

@ -0,0 +1,94 @@
// Copyright 2020 klzgrad <kizdiv@gmail.com>. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef NET_TOOLS_NAIVE_NAIVE_PROXY_DELEGATE_H_
#define NET_TOOLS_NAIVE_NAIVE_PROXY_DELEGATE_H_
#include <cstdint>
#include <map>
#include <string>
#include "base/strings/string_piece.h"
#include "net/base/net_errors.h"
#include "net/base/proxy_delegate.h"
#include "net/base/proxy_server.h"
#include "net/proxy_resolution/proxy_retry_info.h"
#include "net/tools/naive/naive_protocol.h"
#include "url/gurl.h"
namespace net {
void InitializeNonindexCodes();
// |unique_bits| SHOULD have relatively unique values.
void FillNonindexHeaderValue(uint64_t unique_bits, char* buf, int len);
class ProxyInfo;
class HttpRequestHeaders;
class HttpResponseHeaders;
enum class PaddingSupport {
kUnknown = 0,
kCapable,
kIncapable,
};
class NaiveProxyDelegate : public ProxyDelegate {
public:
explicit NaiveProxyDelegate(const HttpRequestHeaders& extra_headers);
~NaiveProxyDelegate() override;
void OnResolveProxy(const GURL& url,
const std::string& method,
const ProxyRetryInfoMap& proxy_retry_info,
ProxyInfo* result) override {}
void OnFallback(const ProxyServer& bad_proxy, int net_error) override {}
// This only affects h2 proxy client socket.
void OnBeforeTunnelRequest(const ProxyServer& proxy_server,
HttpRequestHeaders* extra_headers) override;
Error OnTunnelHeadersReceived(
const ProxyServer& proxy_server,
const HttpResponseHeaders& response_headers) override;
PaddingSupport GetProxyServerPaddingSupport(const ProxyServer& proxy_server);
private:
const HttpRequestHeaders& extra_headers_;
std::map<ProxyServer, PaddingSupport> padding_state_by_server_;
};
class ClientPaddingDetectorDelegate {
public:
virtual ~ClientPaddingDetectorDelegate() = default;
virtual void SetClientPaddingSupport(PaddingSupport padding_support) = 0;
};
class PaddingDetectorDelegate : public ClientPaddingDetectorDelegate {
public:
PaddingDetectorDelegate(NaiveProxyDelegate* naive_proxy_delegate,
const ProxyServer& proxy_server,
ClientProtocol client_protocol);
~PaddingDetectorDelegate() override;
bool IsPaddingSupportKnown();
Direction GetPaddingDirection();
void SetClientPaddingSupport(PaddingSupport padding_support) override;
private:
PaddingSupport GetClientPaddingSupport();
PaddingSupport GetServerPaddingSupport();
NaiveProxyDelegate* naive_proxy_delegate_;
const ProxyServer& proxy_server_;
ClientProtocol client_protocol_;
PaddingSupport detected_client_padding_support_;
// The result is only cached during one connection, so it's still dynamically
// updated in the following connections after server changes support.
PaddingSupport cached_server_padding_support_;
};
} // namespace net
#endif // NET_TOOLS_NAIVE_NAIVE_PROXY_DELEGATE_H_

View File

@ -0,0 +1,246 @@
// Copyright 2019 klzgrad <kizdiv@gmail.com>. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/tools/naive/redirect_resolver.h"
#include <cstring>
#include <iterator>
#include <utility>
#include "base/logging.h"
#include "base/threading/thread_task_runner_handle.h"
#include "net/base/io_buffer.h"
#include "net/base/net_errors.h"
#include "net/dns/dns_query.h"
#include "net/dns/dns_response.h"
#include "net/dns/dns_util.h"
#include "net/socket/datagram_server_socket.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
namespace {
constexpr int kUdpReadBufferSize = 1024;
constexpr int kResolutionTtl = 60;
constexpr int kResolutionRecycleTime = 60 * 5;
std::string PackedIPv4ToString(uint32_t addr) {
return net::IPAddress(addr >> 24, addr >> 16, addr >> 8, addr).ToString();
}
} // namespace
namespace net {
Resolution::Resolution() = default;
Resolution::~Resolution() = default;
RedirectResolver::RedirectResolver(std::unique_ptr<DatagramServerSocket> socket,
const IPAddress& range,
size_t prefix)
: socket_(std::move(socket)),
range_(range),
prefix_(prefix),
offset_(0),
buffer_(base::MakeRefCounted<IOBufferWithSize>(kUdpReadBufferSize)) {
DCHECK(socket_);
// Start accepting connections in next run loop in case when delegate is not
// ready to get callbacks.
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::BindOnce(&RedirectResolver::DoRead,
weak_ptr_factory_.GetWeakPtr()));
}
RedirectResolver::~RedirectResolver() = default;
void RedirectResolver::DoRead() {
for (;;) {
int rv = socket_->RecvFrom(
buffer_.get(), kUdpReadBufferSize, &recv_address_,
base::BindOnce(&RedirectResolver::OnRecv, base::Unretained(this)));
if (rv == ERR_IO_PENDING)
return;
rv = HandleReadResult(rv);
if (rv == ERR_IO_PENDING)
return;
if (rv < 0) {
LOG(INFO) << "DoRead: ignoring error " << rv;
}
}
}
void RedirectResolver::OnRecv(int result) {
int rv;
rv = HandleReadResult(result);
if (rv == ERR_IO_PENDING)
return;
if (rv < 0) {
LOG(INFO) << "OnRecv: ignoring error " << result;
}
DoRead();
}
void RedirectResolver::OnSend(int result) {
if (result < 0) {
LOG(INFO) << "OnSend: ignoring error " << result;
}
DoRead();
}
int RedirectResolver::HandleReadResult(int result) {
if (result < 0)
return result;
DnsQuery query(buffer_.get());
if (!query.Parse(result)) {
LOG(INFO) << "Malformed DNS query from " << recv_address_.ToString();
return ERR_INVALID_ARGUMENT;
}
int size;
if (query.qtype() == dns_protocol::kTypeA) {
Resolution res;
auto name_or = DnsDomainToString(query.qname());
if (!name_or) {
LOG(INFO) << "Malformed DNS query from " << recv_address_.ToString();
return ERR_INVALID_ARGUMENT;
}
const auto& name = name_or.value();
auto by_name_lookup = resolution_by_name_.emplace(name, resolutions_.end());
auto by_name = by_name_lookup.first;
bool has_name = !by_name_lookup.second;
if (has_name) {
auto res_it = by_name->second;
auto by_addr = res_it->by_addr;
uint32_t addr = res_it->addr;
resolutions_.erase(res_it);
resolutions_.emplace_back();
res_it = std::prev(resolutions_.end());
by_name->second = res_it;
by_addr->second = res_it;
res_it->addr = addr;
res_it->name = name;
res_it->time = base::TimeTicks::Now();
res_it->by_name = by_name;
res_it->by_addr = by_addr;
} else {
uint32_t addr = (range_.bytes()[0] << 24) | (range_.bytes()[1] << 16) |
(range_.bytes()[2] << 8) | range_.bytes()[3];
uint32_t subnet = ~0U >> prefix_;
addr &= ~subnet;
addr += offset_;
offset_ = (offset_ + 1) & subnet;
auto by_addr_lookup =
resolution_by_addr_.emplace(addr, resolutions_.end());
auto by_addr = by_addr_lookup.first;
bool has_addr = !by_addr_lookup.second;
if (has_addr) {
// Too few available addresses. Overwrites old one.
auto res_it = by_addr->second;
LOG(INFO) << "Overwrite " << res_it->name << " "
<< PackedIPv4ToString(res_it->addr) << " with " << name << " "
<< PackedIPv4ToString(addr);
resolution_by_name_.erase(res_it->by_name);
resolutions_.erase(res_it);
resolutions_.emplace_back();
res_it = std::prev(resolutions_.end());
by_name->second = res_it;
by_addr->second = res_it;
res_it->addr = addr;
res_it->name = name;
res_it->time = base::TimeTicks::Now();
res_it->by_name = by_name;
res_it->by_addr = by_addr;
} else {
LOG(INFO) << "Add " << name << " " << PackedIPv4ToString(addr);
resolutions_.emplace_back();
auto res_it = std::prev(resolutions_.end());
by_name->second = res_it;
by_addr->second = res_it;
res_it->addr = addr;
res_it->name = name;
res_it->time = base::TimeTicks::Now();
res_it->by_name = by_name;
res_it->by_addr = by_addr;
// Collects garbage.
auto now = base::TimeTicks::Now();
for (auto it = resolutions_.begin();
it != resolutions_.end() &&
(now - it->time).InSeconds() > kResolutionRecycleTime;) {
auto next = std::next(it);
LOG(INFO) << "Drop " << it->name << " "
<< PackedIPv4ToString(it->addr);
resolution_by_name_.erase(it->by_name);
resolution_by_addr_.erase(it->by_addr);
resolutions_.erase(it);
it = next;
}
}
}
DnsResourceRecord record;
record.name = name;
record.type = dns_protocol::kTypeA;
record.klass = dns_protocol::kClassIN;
record.ttl = kResolutionTtl;
uint32_t addr = by_name->second->addr;
record.SetOwnedRdata(IPAddressToPackedString(
IPAddress(addr >> 24, addr >> 16, addr >> 8, addr)));
absl::optional<DnsQuery> query_opt;
query_opt.emplace(query.id(), query.qname(), query.qtype());
DnsResponse response(query.id(), /*is_authoritative=*/false,
/*answers=*/{std::move(record)},
/*authority_records=*/{}, /*additional_records=*/{},
query_opt);
size = response.io_buffer_size();
if (size > buffer_->size() || !response.io_buffer()) {
return ERR_NO_BUFFER_SPACE;
}
std::memcpy(buffer_->data(), response.io_buffer()->data(), size);
} else {
absl::optional<DnsQuery> query_opt;
query_opt.emplace(query.id(), query.qname(), query.qtype());
DnsResponse response(query.id(), /*is_authoritative=*/false, /*answers=*/{},
/*authority_records=*/{}, /*additional_records=*/{},
query_opt, dns_protocol::kRcodeSERVFAIL);
size = response.io_buffer_size();
if (size > buffer_->size() || !response.io_buffer()) {
return ERR_NO_BUFFER_SPACE;
}
std::memcpy(buffer_->data(), response.io_buffer()->data(), size);
}
return socket_->SendTo(
buffer_.get(), size, recv_address_,
base::BindOnce(&RedirectResolver::OnSend, base::Unretained(this)));
}
bool RedirectResolver::IsInResolvedRange(const IPAddress& address) const {
if (!address.IsIPv4())
return false;
return IPAddressMatchesPrefix(address, range_, prefix_);
}
std::string RedirectResolver::FindNameByAddress(
const IPAddress& address) const {
if (!address.IsIPv4())
return {};
uint32_t addr = (address.bytes()[0] << 24) | (address.bytes()[1] << 16) |
(address.bytes()[2] << 8) | address.bytes()[3];
auto by_addr = resolution_by_addr_.find(addr);
if (by_addr == resolution_by_addr_.end())
return {};
return by_addr->second->name;
}
} // namespace net

View File

@ -0,0 +1,69 @@
// Copyright 2019 klzgrad <kizdiv@gmail.com>. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef NET_TOOLS_NAIVE_REDIRECT_RESOLVER_H_
#define NET_TOOLS_NAIVE_REDIRECT_RESOLVER_H_
#include <cstdint>
#include <list>
#include <map>
#include <memory>
#include <string>
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/time/time.h"
#include "net/base/ip_address.h"
#include "net/base/ip_endpoint.h"
namespace net {
class DatagramServerSocket;
class IOBufferWithSize;
struct Resolution {
Resolution();
~Resolution();
uint32_t addr;
std::string name;
base::TimeTicks time;
std::map<std::string, std::list<Resolution>::iterator>::iterator by_name;
std::map<uint32_t, std::list<Resolution>::iterator>::iterator by_addr;
};
class RedirectResolver {
public:
RedirectResolver(std::unique_ptr<DatagramServerSocket> socket,
const IPAddress& range,
size_t prefix);
~RedirectResolver();
RedirectResolver(const RedirectResolver&) = delete;
RedirectResolver& operator=(const RedirectResolver&) = delete;
bool IsInResolvedRange(const IPAddress& address) const;
std::string FindNameByAddress(const IPAddress& address) const;
private:
void DoRead();
void OnRecv(int result);
void OnSend(int result);
int HandleReadResult(int result);
std::unique_ptr<DatagramServerSocket> socket_;
IPAddress range_;
size_t prefix_;
uint32_t offset_;
scoped_refptr<IOBufferWithSize> buffer_;
IPEndPoint recv_address_;
std::map<std::string, std::list<Resolution>::iterator> resolution_by_name_;
std::map<uint32_t, std::list<Resolution>::iterator> resolution_by_addr_;
std::list<Resolution> resolutions_;
base::WeakPtrFactory<RedirectResolver> weak_ptr_factory_{this};
};
} // namespace net
#endif // NET_TOOLS_NAIVE_REDIRECT_RESOLVER_H_

View File

@ -0,0 +1,673 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Copyright 2018 klzgrad <kizdiv@gmail.com>. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/tools/naive/socks5_server_socket.h"
#include <cstring>
#include <utility>
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/cxx17_backports.h"
#include "base/logging.h"
#include "base/sys_byteorder.h"
#include "net/base/ip_address.h"
#include "net/base/net_errors.h"
#include "net/base/sys_addrinfo.h"
#include "net/log/net_log.h"
#include "net/log/net_log_event_type.h"
namespace net {
enum SocksCommandType {
kCommandConnect = 0x01,
kCommandBind = 0x02,
kCommandUDPAssociate = 0x03,
};
static constexpr unsigned int kGreetReadHeaderSize = 2;
static constexpr unsigned int kAuthReadHeaderSize = 2;
static constexpr unsigned int kReadHeaderSize = 5;
static constexpr char kSOCKS5Version = '\x05';
static constexpr char kSOCKS5Reserved = '\x00';
static constexpr char kAuthMethodNone = '\x00';
static constexpr char kAuthMethodUserPass = '\x02';
static constexpr char kAuthMethodNoAcceptable = '\xff';
static constexpr char kSubnegotiationVersion = '\x01';
static constexpr char kAuthStatusSuccess = '\x00';
static constexpr char kAuthStatusFailure = '\xff';
static constexpr char kReplySuccess = '\x00';
static constexpr char kReplyCommandNotSupported = '\x07';
static_assert(sizeof(struct in_addr) == 4, "incorrect system size of IPv4");
static_assert(sizeof(struct in6_addr) == 16, "incorrect system size of IPv6");
Socks5ServerSocket::Socks5ServerSocket(
std::unique_ptr<StreamSocket> transport_socket,
const std::string& user,
const std::string& pass,
const NetworkTrafficAnnotationTag& traffic_annotation)
: io_callback_(base::BindRepeating(&Socks5ServerSocket::OnIOComplete,
base::Unretained(this))),
transport_(std::move(transport_socket)),
next_state_(STATE_NONE),
completed_handshake_(false),
bytes_sent_(0),
was_ever_used_(false),
user_(user),
pass_(pass),
net_log_(transport_->NetLog()),
traffic_annotation_(traffic_annotation) {}
Socks5ServerSocket::~Socks5ServerSocket() {
Disconnect();
}
const HostPortPair& Socks5ServerSocket::request_endpoint() const {
return request_endpoint_;
}
int Socks5ServerSocket::Connect(CompletionOnceCallback callback) {
DCHECK(transport_);
DCHECK_EQ(STATE_NONE, next_state_);
DCHECK(!user_callback_);
// If already connected, then just return OK.
if (completed_handshake_)
return OK;
net_log_.BeginEvent(NetLogEventType::SOCKS5_CONNECT);
next_state_ = STATE_GREET_READ;
buffer_.clear();
int rv = DoLoop(OK);
if (rv == ERR_IO_PENDING) {
user_callback_ = std::move(callback);
} else {
net_log_.EndEventWithNetErrorCode(NetLogEventType::SOCKS5_CONNECT, rv);
}
return rv;
}
void Socks5ServerSocket::Disconnect() {
completed_handshake_ = false;
transport_->Disconnect();
// Reset other states to make sure they aren't mistakenly used later.
// These are the states initialized by Connect().
next_state_ = STATE_NONE;
user_callback_.Reset();
}
bool Socks5ServerSocket::IsConnected() const {
return completed_handshake_ && transport_->IsConnected();
}
bool Socks5ServerSocket::IsConnectedAndIdle() const {
return completed_handshake_ && transport_->IsConnectedAndIdle();
}
const NetLogWithSource& Socks5ServerSocket::NetLog() const {
return net_log_;
}
bool Socks5ServerSocket::WasEverUsed() const {
return was_ever_used_;
}
bool Socks5ServerSocket::WasAlpnNegotiated() const {
if (transport_) {
return transport_->WasAlpnNegotiated();
}
NOTREACHED();
return false;
}
NextProto Socks5ServerSocket::GetNegotiatedProtocol() const {
if (transport_) {
return transport_->GetNegotiatedProtocol();
}
NOTREACHED();
return kProtoUnknown;
}
bool Socks5ServerSocket::GetSSLInfo(SSLInfo* ssl_info) {
if (transport_) {
return transport_->GetSSLInfo(ssl_info);
}
NOTREACHED();
return false;
}
void Socks5ServerSocket::GetConnectionAttempts(ConnectionAttempts* out) const {
out->clear();
}
int64_t Socks5ServerSocket::GetTotalReceivedBytes() const {
return transport_->GetTotalReceivedBytes();
}
void Socks5ServerSocket::ApplySocketTag(const SocketTag& tag) {
return transport_->ApplySocketTag(tag);
}
// Read is called by the transport layer above to read. This can only be done
// if the SOCKS handshake is complete.
int Socks5ServerSocket::Read(IOBuffer* buf,
int buf_len,
CompletionOnceCallback callback) {
DCHECK(completed_handshake_);
DCHECK_EQ(STATE_NONE, next_state_);
DCHECK(!user_callback_);
DCHECK(callback);
int rv = transport_->Read(
buf, buf_len,
base::BindOnce(&Socks5ServerSocket::OnReadWriteComplete,
base::Unretained(this), std::move(callback)));
if (rv > 0)
was_ever_used_ = true;
return rv;
}
// Write is called by the transport layer. This can only be done if the
// SOCKS handshake is complete.
int Socks5ServerSocket::Write(
IOBuffer* buf,
int buf_len,
CompletionOnceCallback callback,
const NetworkTrafficAnnotationTag& traffic_annotation) {
DCHECK(completed_handshake_);
DCHECK_EQ(STATE_NONE, next_state_);
DCHECK(!user_callback_);
DCHECK(callback);
int rv = transport_->Write(
buf, buf_len,
base::BindOnce(&Socks5ServerSocket::OnReadWriteComplete,
base::Unretained(this), std::move(callback)),
traffic_annotation);
if (rv > 0)
was_ever_used_ = true;
return rv;
}
int Socks5ServerSocket::SetReceiveBufferSize(int32_t size) {
return transport_->SetReceiveBufferSize(size);
}
int Socks5ServerSocket::SetSendBufferSize(int32_t size) {
return transport_->SetSendBufferSize(size);
}
void Socks5ServerSocket::DoCallback(int result) {
DCHECK_NE(ERR_IO_PENDING, result);
DCHECK(user_callback_);
// Since Run() may result in Read being called,
// clear user_callback_ up front.
std::move(user_callback_).Run(result);
}
void Socks5ServerSocket::OnIOComplete(int result) {
DCHECK_NE(STATE_NONE, next_state_);
int rv = DoLoop(result);
if (rv != ERR_IO_PENDING) {
net_log_.EndEvent(NetLogEventType::SOCKS5_CONNECT);
DoCallback(rv);
}
}
void Socks5ServerSocket::OnReadWriteComplete(CompletionOnceCallback callback,
int result) {
DCHECK_NE(ERR_IO_PENDING, result);
DCHECK(callback);
if (result > 0)
was_ever_used_ = true;
std::move(callback).Run(result);
}
int Socks5ServerSocket::DoLoop(int last_io_result) {
DCHECK_NE(next_state_, STATE_NONE);
int rv = last_io_result;
do {
State state = next_state_;
next_state_ = STATE_NONE;
switch (state) {
case STATE_GREET_READ:
DCHECK_EQ(OK, rv);
net_log_.BeginEvent(NetLogEventType::SOCKS5_GREET_READ);
rv = DoGreetRead();
break;
case STATE_GREET_READ_COMPLETE:
rv = DoGreetReadComplete(rv);
net_log_.EndEventWithNetErrorCode(NetLogEventType::SOCKS5_GREET_READ,
rv);
break;
case STATE_GREET_WRITE:
DCHECK_EQ(OK, rv);
net_log_.BeginEvent(NetLogEventType::SOCKS5_GREET_WRITE);
rv = DoGreetWrite();
break;
case STATE_GREET_WRITE_COMPLETE:
rv = DoGreetWriteComplete(rv);
net_log_.EndEventWithNetErrorCode(NetLogEventType::SOCKS5_GREET_WRITE,
rv);
break;
case STATE_AUTH_READ:
DCHECK_EQ(OK, rv);
rv = DoAuthRead();
break;
case STATE_AUTH_READ_COMPLETE:
rv = DoAuthReadComplete(rv);
break;
case STATE_AUTH_WRITE:
DCHECK_EQ(OK, rv);
rv = DoAuthWrite();
break;
case STATE_AUTH_WRITE_COMPLETE:
rv = DoAuthWriteComplete(rv);
break;
case STATE_HANDSHAKE_READ:
DCHECK_EQ(OK, rv);
net_log_.BeginEvent(NetLogEventType::SOCKS5_HANDSHAKE_READ);
rv = DoHandshakeRead();
break;
case STATE_HANDSHAKE_READ_COMPLETE:
rv = DoHandshakeReadComplete(rv);
net_log_.EndEventWithNetErrorCode(
NetLogEventType::SOCKS5_HANDSHAKE_READ, rv);
break;
case STATE_HANDSHAKE_WRITE:
DCHECK_EQ(OK, rv);
net_log_.BeginEvent(NetLogEventType::SOCKS5_HANDSHAKE_WRITE);
rv = DoHandshakeWrite();
break;
case STATE_HANDSHAKE_WRITE_COMPLETE:
rv = DoHandshakeWriteComplete(rv);
net_log_.EndEventWithNetErrorCode(
NetLogEventType::SOCKS5_HANDSHAKE_WRITE, rv);
break;
default:
NOTREACHED() << "bad state";
rv = ERR_UNEXPECTED;
break;
}
} while (rv != ERR_IO_PENDING && next_state_ != STATE_NONE);
return rv;
}
int Socks5ServerSocket::DoGreetRead() {
next_state_ = STATE_GREET_READ_COMPLETE;
if (buffer_.empty()) {
read_header_size_ = kGreetReadHeaderSize;
}
int handshake_buf_len = read_header_size_ - buffer_.size();
DCHECK_LT(0, handshake_buf_len);
handshake_buf_ = base::MakeRefCounted<IOBuffer>(handshake_buf_len);
return transport_->Read(handshake_buf_.get(), handshake_buf_len,
io_callback_);
}
int Socks5ServerSocket::DoGreetReadComplete(int result) {
if (result < 0)
return result;
if (result == 0) {
net_log_.AddEvent(
NetLogEventType::SOCKS_UNEXPECTEDLY_CLOSED_DURING_GREETING);
return ERR_SOCKS_CONNECTION_FAILED;
}
buffer_.append(handshake_buf_->data(), result);
// When the first few bytes are read, check how many more are required
// and accordingly increase them
if (buffer_.size() == kGreetReadHeaderSize) {
if (buffer_[0] != kSOCKS5Version) {
net_log_.AddEventWithIntParams(NetLogEventType::SOCKS_UNEXPECTED_VERSION,
"version", buffer_[0]);
return ERR_SOCKS_CONNECTION_FAILED;
}
int nmethods = buffer_[1];
if (nmethods == 0) {
net_log_.AddEvent(NetLogEventType::SOCKS_NO_REQUESTED_AUTH);
return ERR_SOCKS_CONNECTION_FAILED;
}
read_header_size_ += nmethods;
next_state_ = STATE_GREET_READ;
return OK;
}
if (buffer_.size() == read_header_size_) {
int nmethods = buffer_[1];
char expected_method = kAuthMethodNone;
if (!user_.empty() || !pass_.empty()) {
expected_method = kAuthMethodUserPass;
}
void* match =
std::memchr(&buffer_[kGreetReadHeaderSize], expected_method, nmethods);
if (match) {
auth_method_ = expected_method;
} else {
auth_method_ = kAuthMethodNoAcceptable;
}
buffer_.clear();
next_state_ = STATE_GREET_WRITE;
return OK;
}
next_state_ = STATE_GREET_READ;
return OK;
}
int Socks5ServerSocket::DoGreetWrite() {
if (buffer_.empty()) {
const char write_data[] = {kSOCKS5Version, auth_method_};
buffer_ = std::string(write_data, std::size(write_data));
bytes_sent_ = 0;
}
next_state_ = STATE_GREET_WRITE_COMPLETE;
int handshake_buf_len = buffer_.size() - bytes_sent_;
DCHECK_LT(0, handshake_buf_len);
handshake_buf_ = base::MakeRefCounted<IOBuffer>(handshake_buf_len);
std::memcpy(handshake_buf_->data(), &buffer_.data()[bytes_sent_],
handshake_buf_len);
return transport_->Write(handshake_buf_.get(), handshake_buf_len,
io_callback_, traffic_annotation_);
}
int Socks5ServerSocket::DoGreetWriteComplete(int result) {
if (result < 0)
return result;
bytes_sent_ += result;
if (bytes_sent_ == buffer_.size()) {
buffer_.clear();
if (auth_method_ == kAuthMethodNone) {
next_state_ = STATE_HANDSHAKE_READ;
} else if (auth_method_ == kAuthMethodUserPass) {
next_state_ = STATE_AUTH_READ;
} else {
net_log_.AddEvent(NetLogEventType::SOCKS_NO_ACCEPTABLE_AUTH);
return ERR_SOCKS_CONNECTION_FAILED;
}
} else {
next_state_ = STATE_GREET_WRITE;
}
return OK;
}
int Socks5ServerSocket::DoAuthRead() {
next_state_ = STATE_AUTH_READ_COMPLETE;
if (buffer_.empty()) {
read_header_size_ = kAuthReadHeaderSize;
}
int handshake_buf_len = read_header_size_ - buffer_.size();
DCHECK_LT(0, handshake_buf_len);
handshake_buf_ = base::MakeRefCounted<IOBuffer>(handshake_buf_len);
return transport_->Read(handshake_buf_.get(), handshake_buf_len,
io_callback_);
}
int Socks5ServerSocket::DoAuthReadComplete(int result) {
if (result < 0)
return result;
if (result == 0) {
return ERR_SOCKS_CONNECTION_FAILED;
}
buffer_.append(handshake_buf_->data(), result);
// When the first few bytes are read, check how many more are required
// and accordingly increase them
if (buffer_.size() == kAuthReadHeaderSize) {
if (buffer_[0] != kSubnegotiationVersion) {
net_log_.AddEventWithIntParams(NetLogEventType::SOCKS_UNEXPECTED_VERSION,
"version", buffer_[0]);
return ERR_SOCKS_CONNECTION_FAILED;
}
int username_len = buffer_[1];
read_header_size_ += username_len + 1;
next_state_ = STATE_AUTH_READ;
return OK;
}
if (buffer_.size() == read_header_size_) {
int username_len = buffer_[1];
int password_len = buffer_[kAuthReadHeaderSize + username_len];
size_t password_offset = kAuthReadHeaderSize + username_len + 1;
if (buffer_.size() == password_offset && password_len != 0) {
read_header_size_ += password_len;
next_state_ = STATE_AUTH_READ;
return OK;
}
if (buffer_.compare(kAuthReadHeaderSize, username_len, user_) == 0 &&
buffer_.compare(password_offset, password_len, pass_) == 0) {
auth_status_ = kAuthStatusSuccess;
} else {
auth_status_ = kAuthStatusFailure;
}
buffer_.clear();
next_state_ = STATE_AUTH_WRITE;
return OK;
}
next_state_ = STATE_AUTH_READ;
return OK;
}
int Socks5ServerSocket::DoAuthWrite() {
if (buffer_.empty()) {
const char write_data[] = {kSubnegotiationVersion, auth_status_};
buffer_ = std::string(write_data, std::size(write_data));
bytes_sent_ = 0;
}
next_state_ = STATE_AUTH_WRITE_COMPLETE;
int handshake_buf_len = buffer_.size() - bytes_sent_;
DCHECK_LT(0, handshake_buf_len);
handshake_buf_ = base::MakeRefCounted<IOBuffer>(handshake_buf_len);
std::memcpy(handshake_buf_->data(), &buffer_.data()[bytes_sent_],
handshake_buf_len);
return transport_->Write(handshake_buf_.get(), handshake_buf_len,
io_callback_, traffic_annotation_);
}
int Socks5ServerSocket::DoAuthWriteComplete(int result) {
if (result < 0)
return result;
bytes_sent_ += result;
if (bytes_sent_ == buffer_.size()) {
buffer_.clear();
if (auth_status_ == kAuthStatusSuccess) {
next_state_ = STATE_HANDSHAKE_READ;
} else {
return ERR_SOCKS_CONNECTION_FAILED;
}
} else {
next_state_ = STATE_AUTH_WRITE;
}
return OK;
}
int Socks5ServerSocket::DoHandshakeRead() {
next_state_ = STATE_HANDSHAKE_READ_COMPLETE;
if (buffer_.empty()) {
read_header_size_ = kReadHeaderSize;
}
int handshake_buf_len = read_header_size_ - buffer_.size();
DCHECK_LT(0, handshake_buf_len);
handshake_buf_ = base::MakeRefCounted<IOBuffer>(handshake_buf_len);
return transport_->Read(handshake_buf_.get(), handshake_buf_len,
io_callback_);
}
int Socks5ServerSocket::DoHandshakeReadComplete(int result) {
if (result < 0)
return result;
// The underlying socket closed unexpectedly.
if (result == 0) {
net_log_.AddEvent(
NetLogEventType::SOCKS_UNEXPECTEDLY_CLOSED_DURING_HANDSHAKE);
return ERR_SOCKS_CONNECTION_FAILED;
}
buffer_.append(handshake_buf_->data(), result);
// When the first few bytes are read, check how many more are required
// and accordingly increase them
if (buffer_.size() == kReadHeaderSize) {
if (buffer_[0] != kSOCKS5Version || buffer_[2] != kSOCKS5Reserved) {
net_log_.AddEventWithIntParams(NetLogEventType::SOCKS_UNEXPECTED_VERSION,
"version", buffer_[0]);
return ERR_SOCKS_CONNECTION_FAILED;
}
SocksCommandType command = static_cast<SocksCommandType>(buffer_[1]);
if (command == kCommandConnect) {
// The proxy replies with success immediately without first connecting
// to the requested endpoint.
reply_ = kReplySuccess;
} else if (command == kCommandBind || command == kCommandUDPAssociate) {
reply_ = kReplyCommandNotSupported;
} else {
net_log_.AddEventWithIntParams(NetLogEventType::SOCKS_UNEXPECTED_COMMAND,
"commmand", buffer_[1]);
return ERR_SOCKS_CONNECTION_FAILED;
}
// We check the type of IP/Domain the server returns and accordingly
// increase the size of the request. For domains, we need to read the
// size of the domain, so the initial request size is upto the domain
// size. Since for IPv4/IPv6 the size is fixed and hence no 'size' is
// read, we substract 1 byte from the additional request size.
address_type_ = static_cast<SocksEndPointAddressType>(buffer_[3]);
if (address_type_ == kEndPointDomain) {
address_size_ = static_cast<uint8_t>(buffer_[4]);
if (address_size_ == 0) {
net_log_.AddEvent(NetLogEventType::SOCKS_ZERO_LENGTH_DOMAIN);
return ERR_SOCKS_CONNECTION_FAILED;
}
} else if (address_type_ == kEndPointResolvedIPv4) {
address_size_ = sizeof(struct in_addr);
--read_header_size_;
} else if (address_type_ == kEndPointResolvedIPv6) {
address_size_ = sizeof(struct in6_addr);
--read_header_size_;
} else {
// Aborts connection on unspecified address type.
net_log_.AddEventWithIntParams(
NetLogEventType::SOCKS_UNKNOWN_ADDRESS_TYPE, "address_type",
buffer_[3]);
return ERR_SOCKS_CONNECTION_FAILED;
}
read_header_size_ += address_size_ + sizeof(uint16_t);
next_state_ = STATE_HANDSHAKE_READ;
return OK;
}
// When the final bytes are read, setup handshake.
if (buffer_.size() == read_header_size_) {
size_t port_start = read_header_size_ - sizeof(uint16_t);
uint16_t port_net;
std::memcpy(&port_net, &buffer_[port_start], sizeof(uint16_t));
uint16_t port_host = base::NetToHost16(port_net);
size_t address_start = port_start - address_size_;
if (address_type_ == kEndPointDomain) {
std::string domain(&buffer_[address_start], address_size_);
request_endpoint_ = HostPortPair(domain, port_host);
} else {
IPAddress ip_addr(
reinterpret_cast<const uint8_t*>(&buffer_[address_start]),
address_size_);
IPEndPoint endpoint(ip_addr, port_host);
request_endpoint_ = HostPortPair::FromIPEndPoint(endpoint);
}
buffer_.clear();
next_state_ = STATE_HANDSHAKE_WRITE;
return OK;
}
next_state_ = STATE_HANDSHAKE_READ;
return OK;
}
// Writes the SOCKS handshake data to the underlying socket connection.
int Socks5ServerSocket::DoHandshakeWrite() {
next_state_ = STATE_HANDSHAKE_WRITE_COMPLETE;
if (buffer_.empty()) {
const char write_data[] = {
// clang-format off
kSOCKS5Version,
reply_,
kSOCKS5Reserved,
kEndPointResolvedIPv4,
0x00, 0x00, 0x00, 0x00, // BND.ADDR
0x00, 0x00, // BND.PORT
// clang-format on
};
buffer_ = std::string(write_data, std::size(write_data));
bytes_sent_ = 0;
}
int handshake_buf_len = buffer_.size() - bytes_sent_;
DCHECK_LT(0, handshake_buf_len);
handshake_buf_ = base::MakeRefCounted<IOBuffer>(handshake_buf_len);
std::memcpy(handshake_buf_->data(), &buffer_[bytes_sent_], handshake_buf_len);
return transport_->Write(handshake_buf_.get(), handshake_buf_len,
io_callback_, traffic_annotation_);
}
int Socks5ServerSocket::DoHandshakeWriteComplete(int result) {
if (result < 0)
return result;
// We ignore the case when result is 0, since the underlying Write
// may return spurious writes while waiting on the socket.
bytes_sent_ += result;
if (bytes_sent_ == buffer_.size()) {
buffer_.clear();
if (reply_ == kReplySuccess) {
completed_handshake_ = true;
next_state_ = STATE_NONE;
} else {
net_log_.AddEventWithIntParams(NetLogEventType::SOCKS_SERVER_ERROR,
"error_code", reply_);
return ERR_SOCKS_CONNECTION_FAILED;
}
} else {
next_state_ = STATE_HANDSHAKE_WRITE;
}
return OK;
}
int Socks5ServerSocket::GetPeerAddress(IPEndPoint* address) const {
return transport_->GetPeerAddress(address);
}
int Socks5ServerSocket::GetLocalAddress(IPEndPoint* address) const {
return transport_->GetLocalAddress(address);
}
} // namespace net

View File

@ -0,0 +1,169 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Copyright 2018 klzgrad <kizdiv@gmail.com>. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef NET_TOOLS_NAIVE_SOCKS5_SERVER_SOCKET_H_
#define NET_TOOLS_NAIVE_SOCKS5_SERVER_SOCKET_H_
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include "base/memory/scoped_refptr.h"
#include "net/base/completion_once_callback.h"
#include "net/base/completion_repeating_callback.h"
#include "net/base/host_port_pair.h"
#include "net/base/io_buffer.h"
#include "net/base/ip_endpoint.h"
#include "net/log/net_log_with_source.h"
#include "net/socket/connection_attempts.h"
#include "net/socket/next_proto.h"
#include "net/socket/stream_socket.h"
#include "net/ssl/ssl_info.h"
namespace net {
struct NetworkTrafficAnnotationTag;
// This StreamSocket is used to setup a SOCKSv5 handshake with a socks client.
// Currently no SOCKSv5 authentication is supported.
class Socks5ServerSocket : public StreamSocket {
public:
Socks5ServerSocket(std::unique_ptr<StreamSocket> transport_socket,
const std::string& user,
const std::string& pass,
const NetworkTrafficAnnotationTag& traffic_annotation);
// On destruction Disconnect() is called.
~Socks5ServerSocket() override;
Socks5ServerSocket(const Socks5ServerSocket&) = delete;
Socks5ServerSocket& operator=(const Socks5ServerSocket&) = delete;
const HostPortPair& request_endpoint() const;
// StreamSocket implementation.
// Does the SOCKS handshake and completes the protocol.
int Connect(CompletionOnceCallback callback) override;
void Disconnect() override;
bool IsConnected() const override;
bool IsConnectedAndIdle() const override;
const NetLogWithSource& NetLog() const override;
bool WasEverUsed() const override;
bool WasAlpnNegotiated() const override;
NextProto GetNegotiatedProtocol() const override;
bool GetSSLInfo(SSLInfo* ssl_info) override;
void GetConnectionAttempts(ConnectionAttempts* out) const override;
void ClearConnectionAttempts() override {}
void AddConnectionAttempts(const ConnectionAttempts& attempts) override {}
int64_t GetTotalReceivedBytes() const override;
void ApplySocketTag(const SocketTag& tag) override;
// Socket implementation.
int Read(IOBuffer* buf,
int buf_len,
CompletionOnceCallback callback) override;
int Write(IOBuffer* buf,
int buf_len,
CompletionOnceCallback callback,
const NetworkTrafficAnnotationTag& traffic_annotation) override;
int SetReceiveBufferSize(int32_t size) override;
int SetSendBufferSize(int32_t size) override;
int GetPeerAddress(IPEndPoint* address) const override;
int GetLocalAddress(IPEndPoint* address) const override;
private:
enum State {
STATE_GREET_READ,
STATE_GREET_READ_COMPLETE,
STATE_GREET_WRITE,
STATE_GREET_WRITE_COMPLETE,
STATE_AUTH_READ,
STATE_AUTH_READ_COMPLETE,
STATE_AUTH_WRITE,
STATE_AUTH_WRITE_COMPLETE,
STATE_HANDSHAKE_WRITE,
STATE_HANDSHAKE_WRITE_COMPLETE,
STATE_HANDSHAKE_READ,
STATE_HANDSHAKE_READ_COMPLETE,
STATE_NONE,
};
// Addressing type that can be specified in requests or responses.
enum SocksEndPointAddressType {
kEndPointDomain = 0x03,
kEndPointResolvedIPv4 = 0x01,
kEndPointResolvedIPv6 = 0x04,
};
void DoCallback(int result);
void OnIOComplete(int result);
void OnReadWriteComplete(CompletionOnceCallback callback, int result);
int DoLoop(int last_io_result);
int DoGreetRead();
int DoGreetReadComplete(int result);
int DoGreetWrite();
int DoGreetWriteComplete(int result);
int DoAuthRead();
int DoAuthReadComplete(int result);
int DoAuthWrite();
int DoAuthWriteComplete(int result);
int DoHandshakeRead();
int DoHandshakeReadComplete(int result);
int DoHandshakeWrite();
int DoHandshakeWriteComplete(int result);
CompletionRepeatingCallback io_callback_;
// Stores the underlying socket.
std::unique_ptr<StreamSocket> transport_;
State next_state_;
// Stores the callback to the layer above, called on completing Connect().
CompletionOnceCallback user_callback_;
// This IOBuffer is used by the class to read and write
// SOCKS handshake data. The length contains the expected size to
// read or write.
scoped_refptr<IOBuffer> handshake_buf_;
// While writing, this buffer stores the complete write handshake data.
// While reading, it stores the handshake information received so far.
std::string buffer_;
// This becomes true when the SOCKS handshake has completed and the
// overlying connection is free to communicate.
bool completed_handshake_;
// Contains the bytes sent by the SOCKS handshake.
size_t bytes_sent_;
size_t read_header_size_;
bool was_ever_used_;
SocksEndPointAddressType address_type_;
int address_size_;
std::string user_;
std::string pass_;
char auth_method_;
char auth_status_;
char reply_;
HostPortPair request_endpoint_;
NetLogWithSource net_log_;
// Traffic annotation for socket control.
const NetworkTrafficAnnotationTag& traffic_annotation_;
};
} // namespace net
#endif // NET_TOOLS_NAIVE_SOCKS5_SERVER_SOCKET_H_

266
tests/basic.py Normal file
View File

@ -0,0 +1,266 @@
#!/usr/bin/env python3
import argparse
import http.server
import os
import shutil
import ssl
import subprocess
import tempfile
import threading
import time
parser = argparse.ArgumentParser()
parser.add_argument('--naive', required=True)
parser.add_argument('--rootfs')
parser.add_argument('--target_cpu')
argv = parser.parse_args()
if argv.rootfs:
try:
os.remove(os.path.join(argv.rootfs, 'naive'))
except OSError:
pass
_, certfile = tempfile.mkstemp()
result = subprocess.run(
f'openssl req -new -x509 -keyout {certfile} -out {certfile} -days 1 -nodes -subj /C=XX'.split(), capture_output=True)
result.check_returncode()
HTTPS_SERVER_HOSTNAME = '127.0.0.1'
HTTP_SERVER_PORT = 60443
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ssl_context.load_cert_chain(certfile=certfile)
httpd = http.server.HTTPServer(
(HTTPS_SERVER_HOSTNAME, HTTP_SERVER_PORT), http.server.SimpleHTTPRequestHandler)
httpd.timeout = 1
httpd.allow_reuse_address = True
httpd.socket = ssl_context.wrap_socket(httpd.socket, server_side=True)
httpd_thread = threading.Thread(
target=lambda httpd: httpd.serve_forever(), args=(httpd,), daemon=True)
httpd_thread.start()
def test_https_server(hostname, port, proxy=None):
url = f'https://{hostname}:{port}/404'
cmdline = ['curl', '-k', '-s']
if proxy:
cmdline.extend(['--proxy', proxy])
cmdline.append(url)
print('subprocess.run', ' '.join(cmdline))
result = subprocess.run(cmdline, capture_output=True,
timeout=1, text=True, encoding='utf-8')
print(result.stderr, end='')
return 'Error code: 404' in result.stdout
assert test_https_server(HTTPS_SERVER_HOSTNAME,
HTTP_SERVER_PORT), 'https server not up'
def start_naive(naive_args):
with_qemu = None
if argv.target_cpu == 'arm64':
with_qemu = 'aarch64'
elif argv.target_cpu == 'arm':
with_qemu = 'arm'
elif argv.target_cpu == 'mipsel':
with_qemu = 'mipsel'
elif argv.target_cpu == 'mips64el':
with_qemu = 'mips64el'
if argv.rootfs:
if not with_qemu:
if not os.path.exists(os.path.join(argv.rootfs, 'naive')):
shutil.copy2(argv.naive, argv.rootfs)
cmdline = ['bwrap', '--die-with-parent', '--bind', argv.rootfs, '/',
'--proc', '/proc', '--dev', '/dev', '/naive']
else:
cmdline = [f'qemu-{with_qemu}', '-L', argv.rootfs, argv.naive]
else:
cmdline = [argv.naive]
cmdline.extend(naive_args)
proc = subprocess.Popen(cmdline, stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE, text=True, encoding='utf-8')
print('subprocess.Popen', ' '.join(cmdline), 'pid:', proc.pid)
def terminate(proc):
print('proc has timed out')
print('terminate pid', proc.pid)
proc.terminate()
timeout = threading.Timer(10, terminate, args=(proc,))
timeout.start()
while True:
if proc.poll() is not None:
timeout.cancel()
return proc.poll() == 0
line = proc.stderr.readline().strip()
print(line)
if 'Failed to listen: ' in line:
timeout.cancel()
print('terminate pid', proc.pid)
proc.terminate()
return 'Failed to listen'
elif 'Listening on ' in line:
timeout.cancel()
return proc
port = 10000
def allocate_port_number():
global port
port += 1
if port > 60000:
port = 10000
return port
def test_naive_once(proxy, *args, **kwargs):
port_map = {}
class PortDict(dict):
def __init__(self, port_map):
self._port_map = port_map
def __getitem__(self, key):
if key.startswith('PORT'):
if key not in self._port_map:
self._port_map[key] = str(allocate_port_number())
return self._port_map[key]
return key
port_dict = PortDict(port_map)
proxy = proxy.format_map(port_dict)
config_file = kwargs.get('config_file', 'config.json')
if argv.rootfs:
config_file = os.path.join(argv.rootfs, config_file)
config_content = kwargs.get('config_content')
if config_content is not None:
config_content = config_content.format_map(port_dict)
with open(config_file, 'w') as f:
f.write('{')
f.write(config_content)
f.write('}')
naive_procs = []
def cleanup():
if config_content is not None:
os.remove(config_file)
for naive_proc in naive_procs:
print('terminate pid', naive_proc.pid)
naive_proc.terminate()
for args_instance in args:
naive_args = args_instance.format_map(port_dict).split()
naive_proc = start_naive(naive_args)
if naive_proc == 'Failed to listen':
cleanup()
return 'Failed to listen'
if not naive_proc:
cleanup()
return False
naive_procs.append(naive_proc)
result = test_https_server(HTTPS_SERVER_HOSTNAME, HTTP_SERVER_PORT, proxy)
cleanup()
return result
def test_naive(label, proxy, *args, **kwargs):
RETRIES = 5
for i in range(RETRIES):
result = test_naive_once(proxy, *args, **kwargs)
if result == 'Failed to listen':
print('Retrying...')
time.sleep(1)
continue
if result:
print('** TEST PASS:', label, end='\n\n')
return True
return result
print('** TEST FAIL:', label, end='\n\n')
os.exit(1)
test_naive('Default config', 'socks5h://127.0.0.1:1080',
'--log')
test_naive('Default config file', 'socks5h://127.0.0.1:{PORT1}',
'',
config_content='"listen":"socks://127.0.0.1:{PORT1}","log":""')
test_naive('Custom config file', 'socks5h://127.0.0.1:{PORT1}',
'custom.json',
config_content='"listen":"socks://127.0.0.1:{PORT1}","log":""',
config_file='custom.json')
test_naive('Trivial - listen scheme only', 'socks5h://127.0.0.1:1080',
'--log --listen=socks://')
test_naive('Trivial - listen no host', 'socks5h://127.0.0.1:{PORT1}',
'--log --listen=socks://:{PORT1}')
test_naive('Trivial - listen no port', 'socks5h://127.0.0.1:1080',
'--log --listen=socks://127.0.0.1')
test_naive('Trivial - auth', 'socks5h://user:pass@127.0.0.1:{PORT1}',
'--log --listen=socks://user:pass@127.0.0.1:{PORT1}')
test_naive('Trivial - auth with special chars', 'socks5h://user:^@127.0.0.1:{PORT1}',
'--log --listen=socks://user:^@127.0.0.1:{PORT1}')
test_naive('Trivial - auth with special chars', 'socks5h://^:^@127.0.0.1:{PORT1}',
'--log --listen=socks://^:^@127.0.0.1:{PORT1}')
test_naive('Trivial - auth with empty pass', 'socks5h://user:@127.0.0.1:{PORT1}',
'--log --listen=socks://user:@127.0.0.1:{PORT1}')
test_naive('SOCKS-SOCKS', 'socks5h://127.0.0.1:{PORT1}',
'--log --listen=socks://:{PORT1} --proxy=socks://127.0.0.1:{PORT2}',
'--log --listen=socks://:{PORT2}')
test_naive('SOCKS-SOCKS - proxy no port', 'socks5h://127.0.0.1:{PORT1}',
'--log --listen=socks://:{PORT1} --proxy=socks://127.0.0.1',
'--log --listen=socks://:1080')
test_naive('SOCKS-HTTP', 'socks5h://127.0.0.1:{PORT1}',
'--log --listen=socks://:{PORT1} --proxy=http://127.0.0.1:{PORT2}',
'--log --listen=http://:{PORT2}')
test_naive('HTTP-HTTP', 'http://127.0.0.1:{PORT1}',
'--log --listen=http://:{PORT1} --proxy=http://127.0.0.1:{PORT2}',
'--log --listen=http://:{PORT2}')
test_naive('HTTP-SOCKS', 'http://127.0.0.1:{PORT1}',
'--log --listen=http://:{PORT1} --proxy=socks://127.0.0.1:{PORT2}',
'--log --listen=socks://:{PORT2}')
test_naive('SOCKS-SOCKS-SOCKS', 'socks5h://127.0.0.1:{PORT1}',
'--log --listen=socks://:{PORT1} --proxy=socks://127.0.0.1:{PORT2}',
'--log --listen=socks://:{PORT2} --proxy=socks://127.0.0.1:{PORT3}',
'--log --listen=socks://:{PORT3}')
test_naive('SOCKS-HTTP-SOCKS', 'socks5h://127.0.0.1:{PORT1}',
'--log --listen=socks://:{PORT1} --proxy=http://127.0.0.1:{PORT2}',
'--log --listen=http://:{PORT2} --proxy=socks://127.0.0.1:{PORT3}',
'--log --listen=socks://:{PORT3}')
test_naive('HTTP-SOCKS-HTTP', 'http://127.0.0.1:{PORT1}',
'--log --listen=http://:{PORT1} --proxy=socks://127.0.0.1:{PORT2}',
'--log --listen=socks://:{PORT2} --proxy=http://127.0.0.1:{PORT3}',
'--log --listen=http://:{PORT3}')
test_naive('HTTP-HTTP-HTTP', 'http://127.0.0.1:{PORT1}',
'--log --listen=http://:{PORT1} --proxy=http://127.0.0.1:{PORT2}',
'--log --listen=http://:{PORT2} --proxy=http://127.0.0.1:{PORT3}',
'--log --listen=http://:{PORT3}')

157
tests/basic.sh Executable file
View File

@ -0,0 +1,157 @@
#!/bin/sh
set -ex
script_dir=$(dirname "$PWD/$0")
[ "$1" ] || exit 1
naive="$PWD/$1"
. ./get-sysroot.sh
if [ "$WITH_ANDROID_IMG" ]; then
rootfs="$PWD/out/sysroot-build/android/$WITH_ANDROID_IMG"
elif [ "$WITH_SYSROOT" ]; then
rootfs="$PWD/$WITH_SYSROOT"
fi
cd /tmp
python3 "$script_dir"/basic.py --naive="$naive" --rootfs="$rootfs" --target_cpu="$target_cpu"
exit $?
if [ "$WITH_SYSROOT" -a "$WITH_QEMU" ]; then
naive="qemu-$WITH_QEMU -L $PWD/$WITH_SYSROOT $naive"
fi
if [ "$WITH_ANDROID_IMG" -a "$WITH_QEMU" ]; then
naive="qemu-$WITH_QEMU -L $PWD/out/sysroot-build/android/$WITH_ANDROID_IMG $naive"
fi
cd /tmp
MSYS_NO_PATHCONV=1 openssl req -new -x509 -keyout server.pem -out server.pem -days 1 -nodes -subj '/C=XX'
cat >server.py <<EOF
import http.server, ssl
httpd = http.server.HTTPServer(('127.0.0.1', 60443), http.server.SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket, server_side=True, certfile='server.pem')
httpd.serve_forever()
EOF
echo Hello >hello.txt
python3=$(which python3 2>/dev/null || which python 2>/dev/null)
$python3 server.py &
trap "rm -f server.py server.pem hello.txt; kill $!" EXIT
alias curl='curl -v --retry-connrefused --retry-delay 1 --retry 5'
curl -k https://127.0.0.1:60443/hello.txt
test_proxy() {
curl --proxy "$1" -k https://127.0.0.1:60443/hello.txt | grep 'Hello'
}
test_naive() {
test_name="$1"
proxy="$2"
echo "TEST '$test_name':"
shift 2
if (
trap 'kill $pid' EXIT
pid=
for arg in "$@"; do
name=naive$(echo "$arg" | tr -c 0-9a-z _)
$naive $arg 2>$name.log & pid="$pid $!"
tail -f $name.log & pid="$pid $!"
for i in $(seq 10); do
if grep -q 'Listening on' $name.log; then
break
fi
if [ $i -eq 10 ]; then
echo Timeout to start naive
ss -ntlp
exit 1
fi
sleep 1
done
done
test_proxy "$proxy"
); then
echo "TEST '$test_name': PASS"
true
else
echo "TEST '$test_name': FAIL"
false
fi
}
test_naive 'Default config' socks5h://127.0.0.1:1080 '--log'
echo '{"listen":"socks://127.0.0.1:60101","log":""}' >config.json
test_naive 'Default config file' socks5h://127.0.0.1:60101 ''
rm -f config.json
echo '{"listen":"socks://127.0.0.1:60201","log":""}' >/tmp/config.json
test_naive 'Config file' socks5h://127.0.0.1:60201 '/tmp/config.json'
rm -f /tmp/config.json
test_naive 'Trivial - listen scheme only' socks5h://127.0.0.1:1080 \
'--log --listen=socks://'
test_naive 'Trivial - listen no host' socks5h://127.0.0.1:60301 \
'--log --listen=socks://:60301'
test_naive 'Trivial - listen no port' socks5h://127.0.0.1:1080 \
'--log --listen=socks://127.0.0.1'
test_naive 'Trivial - auth' socks5h://user:pass@127.0.0.1:60311 \
'--log --listen=socks://user:pass@127.0.0.1:60311'
test_naive 'Trivial - auth with special chars' socks5h://user:^@127.0.0.1:60312 \
'--log --listen=socks://user:^@127.0.0.1:60312'
test_naive 'Trivial - auth with special chars' socks5h://^:^@127.0.0.1:60313 \
'--log --listen=socks://^:^@127.0.0.1:60313'
test_naive 'Trivial - auth with empty pass' socks5h://user:@127.0.0.1:60314 \
'--log --listen=socks://user:@127.0.0.1:60314'
test_naive 'SOCKS-SOCKS' socks5h://127.0.0.1:60401 \
'--log --listen=socks://:60401 --proxy=socks://127.0.0.1:60402' \
'--log --listen=socks://:60402'
test_naive 'SOCKS-SOCKS - proxy no port' socks5h://127.0.0.1:60501 \
'--log --listen=socks://:60501 --proxy=socks://127.0.0.1' \
'--log --listen=socks://:1080'
test_naive 'SOCKS-HTTP' socks5h://127.0.0.1:60601 \
'--log --listen=socks://:60601 --proxy=http://127.0.0.1:60602' \
'--log --listen=http://:60602'
test_naive 'HTTP-HTTP' http://127.0.0.1:60701 \
'--log --listen=http://:60701 --proxy=http://127.0.0.1:60702' \
'--log --listen=http://:60702'
test_naive 'HTTP-SOCKS' http://127.0.0.1:60801 \
'--log --listen=http://:60801 --proxy=socks://127.0.0.1:60802' \
'--log --listen=socks://:60802'
test_naive 'SOCKS-HTTP padded' socks5h://127.0.0.1:60901 \
'--log --listen=socks://:60901 --proxy=http://127.0.0.1:60902 --padding' \
'--log --listen=http://:60902 --padding'
test_naive 'SOCKS-SOCKS-SOCKS' socks5h://127.0.0.1:61001 \
'--log --listen=socks://:61001 --proxy=socks://127.0.0.1:61002' \
'--log --listen=socks://:61002 --proxy=socks://127.0.0.1:61003' \
'--log --listen=socks://:61003'
test_naive 'SOCKS-HTTP-SOCKS' socks5h://127.0.0.1:61101 \
'--log --listen=socks://:61101 --proxy=http://127.0.0.1:61102' \
'--log --listen=http://:61102 --proxy=socks://127.0.0.1:61103' \
'--log --listen=socks://:61103'
test_naive 'HTTP-SOCKS-HTTP' http://127.0.0.1:61201 \
'--log --listen=http://:61201 --proxy=socks://127.0.0.1:61202' \
'--log --listen=socks://:61202 --proxy=http://127.0.0.1:61203' \
'--log --listen=http://:61203'
test_naive 'HTTP-HTTP-HTTP' http://127.0.0.1:61301 \
'--log --listen=http://:61301 --proxy=http://127.0.0.1:61302' \
'--log --listen=http://:61302 --proxy=http://127.0.0.1:61303' \
'--log --listen=http://:61303'

101
tools/build_test.sh Executable file
View File

@ -0,0 +1,101 @@
#!/bin/sh
set -ex
cd src
unset EXTRA_FLAGS
unset OPENWRT_FLAGS
ccache -C
./get-clang.sh
for i in x64 x86 arm64 arm mipsel mips64el; do
unset EXTRA_FLAGS
unset OPENWRT_FLAGS
export EXTRA_FLAGS="target_cpu=\"$i\""
./get-clang.sh
done
for i in x64 x86 arm64 arm; do
unset EXTRA_FLAGS
unset OPENWRT_FLAGS
export EXTRA_FLAGS="target_cpu=\"$i\" target_os=\"android\""
./get-clang.sh
done
config_openwrt() {
arch="$1"
openwrt="$2"
target_cpu="$3"
extra="$4"
export EXTRA_FLAGS="target_cpu=\"$target_cpu\" target_os=\"openwrt\" use_allocator=\"none\" use_allocator_shim=false $extra"
export OPENWRT_FLAGS="arch=$arch release=19.07.7 gcc_ver=7.5.0 $openwrt"
./get-clang.sh
}
config_openwrt x86_64 'target=x86 subtarget=64' x64
config_openwrt x86 'target=x86 subtarget=generic' x86
config_openwrt aarch64_cortex-a53 'target=sunxi subtarget=cortexa53' arm64 'arm_version=0 arm_cpu="cortex-a53"'
config_openwrt aarch64_cortex-a72 'target=mvebu subtarget=cortexa72' arm64 'arm_version=0 arm_cpu="cortex-a72"'
config_openwrt aarch64_generic 'target=armvirt subtarget=64' arm64
config_openwrt arm_cortex-a5_vfpv4 'target=at91 subtarget=sama5' arm 'arm_version=0 arm_cpu="cortex-a5" arm_fpu="vfpv4" arm_float_abi="hard" arm_use_neon=false'
config_openwrt arm_cortex-a7_neon-vfpv4 'target=sunxi subtarget=cortexa7' arm 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true'
config_openwrt arm_cortex-a8_neon 'target=samsung subtarget=s5pv210' arm 'arm_version=0 arm_cpu="cortex-a8" arm_fpu="neon" arm_float_abi="hard" arm_use_neon=true'
config_openwrt arm_cortex-a8_vfpv3 'target=sunxi subtarget=cortexa8' arm 'arm_version=0 arm_cpu="cortex-a8" arm_fpu="vfpv3" arm_float_abi="hard" arm_use_neon=false'
config_openwrt arm_cortex-a9 'target=bcm53xx' arm 'arm_version=0 arm_cpu="cortex-a9" arm_float_abi="soft" arm_use_neon=false'
config_openwrt arm_cortex-a9_neon 'target=imx6' arm 'arm_version=0 arm_cpu="cortex-a9" arm_fpu="neon" arm_float_abi="hard" arm_use_neon=true'
config_openwrt arm_cortex-a9_vfpv3-d16 'target=tegra' arm 'arm_version=0 arm_cpu="cortex-a9" arm_fpu="vfpv3-d16" arm_float_abi="hard" arm_use_neon=false'
config_openwrt arm_cortex-a15_neon-vfpv4 'target=armvirt subtarget=32' arm 'arm_version=0 arm_cpu="cortex-a15" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true'
config_openwrt mipsel_24kc 'target=ramips subtarget=rt305x' mipsel 'mips_arch_variant="r2" mips_float_abi="soft" mips_tune="24kc" use_lld=false use_gold=false'
config_openwrt mipsel_74kc 'target=ramips subtarget=rt3883' mipsel 'mips_arch_variant="r2" mips_float_abi="soft" mips_tune="74kc" use_lld=false use_gold=false'
config_openwrt mipsel_mips32 'target=rb532' mipsel 'mips_arch_variant="r1" mips_float_abi="soft" use_lld=false use_gold=false'
rm -f /tmp/trace
inotifywait -m -r -o/tmp/trace --format '%w%f %e' . &
pid=$!
unset EXTRA_FLAGS
unset OPENWRT_FLAGS
./build.sh
for i in x64 x86 arm64 arm mipsel mips64el; do
unset EXTRA_FLAGS
unset OPENWRT_FLAGS
export EXTRA_FLAGS="target_cpu=\"$i\""
./build.sh
done
for i in x64 x86 arm64 arm; do
unset EXTRA_FLAGS
unset OPENWRT_FLAGS
export EXTRA_FLAGS="target_cpu=\"$i\" target_os=\"android\""
./build.sh
done
build_openwrt() {
arch="$1"
openwrt="$2"
target_cpu="$3"
extra="$4"
export EXTRA_FLAGS="target_cpu=\"$target_cpu\" target_os=\"openwrt\" use_allocator=\"none\" use_allocator_shim=false $extra"
export OPENWRT_FLAGS="arch=$arch release=19.07.7 gcc_ver=7.5.0 $openwrt"
./build.sh
}
build_openwrt x86_64 'target=x86 subtarget=64' x64
build_openwrt x86 'target=x86 subtarget=generic' x86
build_openwrt aarch64_cortex-a53 'target=sunxi subtarget=cortexa53' arm64 'arm_version=0 arm_cpu="cortex-a53"'
build_openwrt aarch64_cortex-a72 'target=mvebu subtarget=cortexa72' arm64 'arm_version=0 arm_cpu="cortex-a72"'
build_openwrt aarch64_generic 'target=armvirt subtarget=64' arm64
build_openwrt arm_cortex-a5_vfpv4 'target=at91 subtarget=sama5' arm 'arm_version=0 arm_cpu="cortex-a5" arm_fpu="vfpv4" arm_float_abi="hard" arm_use_neon=false'
build_openwrt arm_cortex-a7_neon-vfpv4 'target=sunxi subtarget=cortexa7' arm 'arm_version=0 arm_cpu="cortex-a7" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true'
build_openwrt arm_cortex-a8_neon 'target=samsung subtarget=s5pv210' arm 'arm_version=0 arm_cpu="cortex-a8" arm_fpu="neon" arm_float_abi="hard" arm_use_neon=true'
build_openwrt arm_cortex-a8_vfpv3 'target=sunxi subtarget=cortexa8' arm 'arm_version=0 arm_cpu="cortex-a8" arm_fpu="vfpv3" arm_float_abi="hard" arm_use_neon=false'
build_openwrt arm_cortex-a9 'target=bcm53xx' arm 'arm_version=0 arm_cpu="cortex-a9" arm_float_abi="soft" arm_use_neon=false'
build_openwrt arm_cortex-a9_neon 'target=imx6' arm 'arm_version=0 arm_cpu="cortex-a9" arm_fpu="neon" arm_float_abi="hard" arm_use_neon=true'
build_openwrt arm_cortex-a9_vfpv3-d16 'target=tegra' arm 'arm_version=0 arm_cpu="cortex-a9" arm_fpu="vfpv3-d16" arm_float_abi="hard" arm_use_neon=false'
build_openwrt arm_cortex-a15_neon-vfpv4 'target=armvirt subtarget=32' arm 'arm_version=0 arm_cpu="cortex-a15" arm_fpu="neon-vfpv4" arm_float_abi="hard" arm_use_neon=true'
build_openwrt mipsel_24kc 'target=ramips subtarget=rt305x' mipsel 'mips_arch_variant="r2" mips_float_abi="soft" mips_tune="24kc" use_lld=false use_gold=false'
build_openwrt mipsel_74kc 'target=ramips subtarget=rt3883' mipsel 'mips_arch_variant="r2" mips_float_abi="soft" mips_tune="74kc" use_lld=false use_gold=false'
build_openwrt mipsel_mips32 'target=rb532' mipsel 'mips_arch_variant="r1" mips_float_abi="soft" use_lld=false use_gold=false'
kill $pid

5
tools/build_test_stats.sh Executable file
View File

@ -0,0 +1,5 @@
#!/bin/sh
for i in /tmp/trace.*; do
cut -d' ' -f1 $i | LC_ALL=C sort -u | sed 's/\/$//' | LC_ALL=C sort -u >$i.sorted
done
cat /tmp/trace.*.sorted | LC_ALL=C sort -u >/tmp/detected-files

21
tools/exclude.txt Normal file
View File

@ -0,0 +1,21 @@
.gitignore
*_unittest.cc
*_unittest.mm
*_unittest.nc
*_perftest.cc
*_test.cc
*fuzz*
*org/chromium*
*.golden
*.javap*
*.pyc
net/data/[!s]*
net/data/s[!s]*
net/data/ssl/[!ce]*
net/data/ssl/c[!h]*
net/http/transport_security_state_static.json
net/third_party/nist-pkits
third_party/boringssl/src/crypto/hpke/test-vectors.json
third_party/boringssl/src/crypto/cipher_extra/test
third_party/boringssl/src/third_party/googletest
third_party/boringssl/src/third_party/wycheproof_testvectors

28
tools/import-upstream.sh Executable file
View File

@ -0,0 +1,28 @@
#!/bin/sh
set -ex
have_version=$(cut -d= -f2 src/chrome/VERSION | tr '\n' . | cut -d. -f1-4)
want_version=$(cat CHROMIUM_VERSION)
if [ "$have_version" = "$want_version" ]; then
exit 0
fi
name="chromium-$want_version"
tarball="$name.tar.xz"
url="https://commondatastorage.googleapis.com/chromium-browser-official/$tarball"
root=$(git rev-list --max-parents=0 HEAD)
branch=$(git branch --show-current)
git config core.autocrlf false
git config core.safecrlf false
git -c advice.detachedHead=false checkout $root
rm -rf src
git checkout "$branch" -- tools
sed -i "s/^\^/$name\//" tools/include.txt
if [ -f "/tmp/$tarball" ]; then
cat "/tmp/$tarball" | tar xJf - --wildcards --wildcards-match-slash -T tools/include.txt -X tools/exclude.txt
else
curl "$url" -o- | tar xJf - --wildcards --wildcards-match-slash -T tools/include.txt -X tools/exclude.txt
fi
mv "$name" src
git rm --quiet --force -r tools
git add src
git commit --quiet --amend -m "Import $name" --date=now
git rebase --onto HEAD "$root" "$branch"

67
tools/include.txt Normal file
View File

@ -0,0 +1,67 @@
^.clang-format
^.gitattributes
^.gitignore
^.gn
^AUTHORS
^BUILD.gn
^DEPS
^LICENSE
^base
^build
^build_overrides/build.gni
^buildtools/deps_revisions.gni
^buildtools/third_party/eu-strip/bin/eu-strip
^buildtools/third_party/libc++/BUILD.gn
^buildtools/third_party/libc++/__config_site
^buildtools/third_party/libc++/trunk/include
^buildtools/third_party/libc++/trunk/src
^buildtools/third_party/libc++abi/BUILD.gn
^buildtools/third_party/libc++abi/cxa_demangle_stub.cc
^buildtools/third_party/libc++abi/trunk/include
^buildtools/third_party/libc++abi/trunk/src
^buildtools/third_party/libunwind
^chrome/VERSION
^chrome/android/profiles/newest.txt
^chrome/app/theme/chromium/BRANDING
^chrome/build/*.txt
^components/cronet
^components/grpc_support
^components/prefs
^components/version_info
^crypto
^ipc/ipc_param_traits.h
^net
^testing/gtest/include/gtest/gtest_prod.h
^third_party/abseil-cpp
^third_party/angle/dotfile_settings.gni
^third_party/angle/src/commit_id.py
^third_party/angle/scripts/file_exists.py
^third_party/apple_apsl
^third_party/ashmem
^third_party/boringssl
^third_party/brotli
^third_party/closure_compiler/closure_args.gni
^third_party/closure_compiler/compile_js.gni
^third_party/depot_tools/cpplint.py
^third_party/depot_tools/download_from_google_storage.py
^third_party/depot_tools/subprocess2.py
^third_party/googletest/BUILD.gn
^third_party/googletest/src/googletest/include/gtest/gtest_prod.h
^third_party/icu/config.gni
^third_party/lss/linux_syscall_support.h
^third_party/modp_b64
^third_party/nasm
^third_party/perfetto/include/perfetto/tracing/traced_value_forward.h
^third_party/protobuf/BUILD.gn
^third_party/protobuf/proto_library.gni
^third_party/protobuf/src
^third_party/zlib
^tools/cfi
^tools/clang/scripts/update.py
^tools/diagnosis
^tools/grit
^tools/gritsettings
^tools/protoc_wrapper
^tools/update_pgo_profiles.py
^tools/win/DebugVisualizers
^url

60
tools/list-openwrt.sh Normal file
View File

@ -0,0 +1,60 @@
#!/bin/sh
# $version can be 21.02 or 19.07.
version=19.07.7
if [ ! -d /tmp/openwrt ]; then
cd /tmp
git clone https://github.com/openwrt/openwrt.git
cd openwrt
fi
cd /tmp/openwrt
git -c advice.detachedHead=false checkout v$version
export TOPDIR=$PWD
cd target/linux
>targets.git
for target in *; do
[ -d $target ] || continue
subtargets=$(make -C $target --no-print-directory DUMP=1 TARGET_BUILD=1 val.SUBTARGETS 2>/dev/null)
[ "$subtargets" ] || subtargets=generic
for subtarget in $subtargets; do
echo $(make -C $target --no-print-directory DUMP=1 TARGET_BUILD=1 SUBTARGET=$subtarget 2>/dev/null | egrep '^(Target:|Target-Arch-Packages:)' | cut -d: -f2) >>targets.git
done
done
targets=$(curl -s https://downloads.openwrt.org/releases/$version/targets/ | grep '<td class="n"><a href=' | cut -d'"' -f4 | sed 's,/,,')
>targets.sdk
for target in $targets; do
subtargets=$(curl -s https://downloads.openwrt.org/releases/$version/targets/$target/ | grep '<td class="n"><a href=' | cut -d'"' -f4 | sed 's,/,,')
for subtarget in $subtargets; do
arch=$(curl -s https://downloads.openwrt.org/releases/$version/targets/$target/$subtarget/profiles.json | grep arch_packages | cut -d'"' -f4)
echo $target/$subtarget $arch >>targets.sdk
done
done
cat >parse-targets.py <<EOF
arch_by_target_git = {}
arch_by_target_sdk = {}
for line in open('targets.git'):
fields = line.split()
if not fields:
continue
arch_by_target_git[fields[0]] = fields[1]
for line in open('targets.sdk'):
fields = line.split()
if len(fields) == 2:
if arch_by_target_git[fields[0]] != fields[1]:
raise Exception(line + ': wrong arch')
arch_by_target_sdk[fields[0]] = fields[1]
else:
arch_by_target_sdk[fields[0]] = ''
for arch in sorted(set(arch_by_target_git.values())):
targets = []
for t in arch_by_target_git:
if arch_by_target_git[t] != arch:
continue
if t in arch_by_target_sdk:
targets.append(t)
else:
targets.append('~~' + t + '~~')
print('|', arch, '|?|', ' '.join(sorted(set(targets))), '|')
EOF
python3 parse-targets.py