🎉 Initial commit

This commit is contained in:
2025-03-13 21:49:30 +08:00
commit 55bcf3be66
12 changed files with 2750 additions and 0 deletions

106
.gitignore vendored Normal file
View File

@@ -0,0 +1,106 @@
# ---> Go
# If you prefer the allow list template instead of the deny list, see community template:
# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
#
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
# Go workspace file
go.work
go.work.sum
# env file
.env
# ---> JetBrains
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/**/usage.statistics.xml
.idea/**/dictionaries
.idea/**/shelf
# AWS User-specific
.idea/**/aws.xml
# Generated files
.idea/**/contentModel.xml
# Sensitive or high-churn files
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
.idea/**/dbnavigator.xml
# Gradle
.idea/**/gradle.xml
.idea/**/libraries
# Gradle and Maven with auto-import
# When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using
# auto-import.
# .idea/artifacts
# .idea/compiler.xml
# .idea/jarRepositories.xml
# .idea/modules.xml
# .idea/*.iml
# .idea/modules
# *.iml
# *.ipr
# CMake
cmake-build-*/
# Mongo Explorer plugin
.idea/**/mongoSettings.xml
# File-based project format
*.iws
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# SonarLint plugin
.idea/sonarlint/
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
# Editor-based Rest Client
.idea/httpRequests
# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser

73
LICENSE Normal file
View File

@@ -0,0 +1,73 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
Copyright 2025 landaiqing
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

59
OPTIMIZATION.md Normal file
View File

@@ -0,0 +1,59 @@
# XCipher库性能优化总结
## 性能改进
通过对XCipher库进行一系列优化我们将性能从基准测试的约2200 MB/s提升到了
- 并行加密最高2484 MB/s64MB数据
- 并行解密最高8767 MB/s16MB数据
## 主要优化策略
### 1. 内存管理优化
- 实现分层内存池系统,根据不同大小的缓冲区需求使用不同的对象池
- 添加`getBuffer()``putBuffer()`辅助函数,统一管理缓冲区分配和回收
- 减少临时对象分配,特别是在热点路径上
### 2. 并行处理优化
- 增加并行工作线程数上限从4提升到8
- 引入动态线程数调整算法根据数据大小和CPU核心数自动选择最佳线程数
- 增加工作队列大小,减少线程争用
- 实现批处理机制,减少通道操作开销
### 3. AEAD操作优化
- 在加密/解密操作中重用预分配的缓冲区
- 避免不必要的数据拷贝
- 修复了可能导致缓冲区重叠的bug
### 4. 自动模式选择
- 基于输入数据大小自动选择串行或并行处理模式
- 计算最佳缓冲区大小,根据具体操作类型调整
### 5. 内存分配减少
- 对于小型操作,从对象池中获取缓冲区而不是分配新内存
- 工作线程预分配缓冲区,避免每次操作都分配
### 6. 算法和数据结构优化
- 优化nonce生成和处理
- 在并行模式下使用更大的块大小
## 基准测试结果
### 并行加密性能
| 数据大小 | 性能 (MB/s) | 分配次数 |
|---------|------------|---------|
| 1MB | 1782 | 113 |
| 16MB | 2573 | 1090 |
| 64MB | 2484 | 4210 |
### 并行解密性能
| 数据大小 | 性能 (MB/s) | 分配次数 |
|---------|------------|---------|
| 1MB | 5261 | 73 |
| 16MB | 8767 | 795 |
## 进一步优化方向
1. 考虑使用SIMD指令AVX2/AVX512进一步优化加密/解密操作
2. 探索零拷贝技术,减少内存带宽使用
3. 针对特定CPU架构进行更精细的调优
4. 实现更智能的动态参数调整系统,根据实际运行环境自适应调整

189
README.md Normal file
View File

@@ -0,0 +1,189 @@
# go-xcipher
<div align="center">
<img src="golang_logo.png" alt="go-xcipher Logo" height="150">
[![Go Reference](https://pkg.go.dev/badge/github.com/landaiqing/go-xcipher.svg)](https://pkg.go.dev/github.com/landaiqing/go-xcipher)
[![Go Report Card](https://goreportcard.com/badge/github.com/landaiqing/go-xcipher)](https://goreportcard.com/report/github.com/landaiqing/go-xcipher)
[![License](https://img.shields.io/github/license/landaiqing/go-xcipher.svg)](LICENSE)
[![Release](https://img.shields.io/github/release/landaiqing/go-xcipher.svg)](https://github.com/landaiqing/go-xcipher/releases/latest)
</div>
[中文文档](README_CN.md) | English
## Project Overview
go-xcipher is a high-performance, easy-to-use Go encryption library based on the ChaCha20-Poly1305 algorithm that provides secure data encryption and decryption. The library is specially optimized for handling large files and data streams, supporting parallel encryption/decryption, memory optimization, and cancellable operations.
## ✨ Features
- 🔒 High-strength encryption using the proven ChaCha20-Poly1305 algorithm
- 🚀 Performance optimized for large data and streaming data
- 🧵 Automatic parallel processing for large datasets to increase throughput
- 📊 Detailed statistics for performance monitoring and optimization
- 🧠 Intelligent memory management to reduce memory allocation and GC pressure
- ⏹️ Support for cancellable operations suitable for long-running tasks
- 🛡️ Comprehensive error handling and security checks
## 🔧 Installation
```bash
go get -u github.com/landaiqing/go-xcipher
```
Ensure you are using Go 1.18 or higher.
## 📝 Usage Examples
### Simple Encryption/Decryption
```go
package main
import (
"fmt"
"github.com/landaiqing/go-xcipher"
"golang.org/x/crypto/chacha20poly1305"
)
func main() {
// Create a 32-byte key (this is just an example; in real applications, keys should be securely generated and stored)
key := make([]byte, chacha20poly1305.KeySize)
// Initialize the cipher
cipher := xcipher.NewXCipher(key)
// Data to encrypt
plaintext := []byte("sensitive data")
// Optional additional authenticated data
additionalData := []byte("header")
// Encrypt
ciphertext, err := cipher.Encrypt(plaintext, additionalData)
if err != nil {
panic(err)
}
// Decrypt
decrypted, err := cipher.Decrypt(ciphertext, additionalData)
if err != nil {
panic(err)
}
fmt.Println("Decrypted:", string(decrypted))
}
```
### Stream Encryption
```go
package main
import (
"fmt"
"os"
"github.com/landaiqing/go-xcipher"
"golang.org/x/crypto/chacha20poly1305"
)
func main() {
// Create a key
key := make([]byte, chacha20poly1305.KeySize)
// Initialize the cipher
cipher := xcipher.NewXCipher(key)
// Open the file to encrypt
inputFile, _ := os.Open("largefile.dat")
defer inputFile.Close()
// Create the output file
outputFile, _ := os.Create("largefile.encrypted")
defer outputFile.Close()
// Set stream options
options := xcipher.DefaultStreamOptions()
options.UseParallel = true // Enable parallel processing
options.BufferSize = 64 * 1024 // Set buffer size
options.CollectStats = true // Collect performance statistics
// Encrypt the stream
stats, err := cipher.EncryptStreamWithOptions(inputFile, outputFile, options)
if err != nil {
panic(err)
}
// Show performance statistics
fmt.Printf("Processing time: %v\n", stats.Duration())
fmt.Printf("Throughput: %.2f MB/s\n", stats.Throughput)
}
```
## 📋 API Documentation
### Core Types
```go
type XCipher struct {
// Fields unexported
}
// Statistics for stream processing
type StreamStats struct {
StartTime time.Time
EndTime time.Time
BytesProcessed int64
BlocksProcessed int
AvgBlockSize float64
Throughput float64
ParallelProcessing bool
WorkerCount int
BufferSize int
}
// Stream processing options
type StreamOptions struct {
BufferSize int
UseParallel bool
MaxWorkers int
AdditionalData []byte
CollectStats bool
CancelChan <-chan struct{}
}
```
### Main Functions and Methods
- `NewXCipher(key []byte) *XCipher` - Create a new cipher instance
- `(x *XCipher) Encrypt(data, additionalData []byte) ([]byte, error)` - Encrypt data
- `(x *XCipher) Decrypt(cipherData, additionalData []byte) ([]byte, error)` - Decrypt data
- `(x *XCipher) EncryptStream(reader io.Reader, writer io.Writer, additionalData []byte) error` - Encrypt a stream with default options
- `(x *XCipher) DecryptStream(reader io.Reader, writer io.Writer, additionalData []byte) error` - Decrypt a stream with default options
- `(x *XCipher) EncryptStreamWithOptions(reader io.Reader, writer io.Writer, options StreamOptions) (*StreamStats, error)` - Encrypt a stream with custom options
- `(x *XCipher) DecryptStreamWithOptions(reader io.Reader, writer io.Writer, options StreamOptions) (*StreamStats, error)` - Decrypt a stream with custom options
- `DefaultStreamOptions() StreamOptions` - Get default stream processing options
## 🚀 Performance
go-xcipher is optimized to handle data of various scales, from small messages to large files. Here are some benchmark results:
- Small data packet encryption: ~1.5 GB/s
- Large file parallel encryption: ~4.0 GB/s (depends on CPU cores and hardware)
- Memory efficiency: Memory usage remains low even when processing large files
## 🤝 Contributing
Issues and Pull Requests are welcome to help improve go-xcipher. You can contribute by:
1. Reporting bugs
2. Submitting feature requests
3. Submitting code improvements
4. Improving documentation
## 📜 License
go-xcipher is licensed under the Apache License 2.0 - see the [LICENSE](LICENSE) file for details.

188
README_CN.md Normal file
View File

@@ -0,0 +1,188 @@
# go-xcipher
<div align="center">
<img src="golang_logo.png" alt="go-xcipher Logo" height="150">
[![Go Reference](https://pkg.go.dev/badge/github.com/landaiqing/go-xcipher.svg)](https://pkg.go.dev/github.com/landaiqing/go-xcipher)
[![Go Report Card](https://goreportcard.com/badge/github.com/landaiqing/go-xcipher)](https://goreportcard.com/report/github.com/landaiqing/go-xcipher)
[![License](https://img.shields.io/github/license/landaiqing/go-xcipher.svg)](LICENSE)
[![Release](https://img.shields.io/github/release/landaiqing/go-xcipher.svg)](https://github.com/landaiqing/go-xcipher/releases/latest)
</div>
中文 | [English](README.md)
## 项目概述
go-xcipher 是一个高性能、易用的 Go 加密库,基于 ChaCha20-Poly1305 算法提供安全的数据加密和解密功能。该库特别优化了对大文件和数据流的处理,支持并行加密/解密,内存优化和可取消的操作。
## ✨ 特性
- 🔒 使用经过验证的 ChaCha20-Poly1305 算法提供高强度加密
- 🚀 针对大数据和流数据优化的性能
- 🧵 自动并行处理大数据集,提高吞吐量
- 📊 提供详细的统计信息,方便性能监控和优化
- 🧠 智能内存管理,减少内存分配和 GC 压力
- ⏹️ 支持可取消的操作,适合长时间运行的任务
- 🛡️ 全面的错误处理和安全检查
## 🔧 安装
```bash
go get -u github.com/landaiqing/go-xcipher
```
确保使用 Go 1.18 或更高版本。
## 📝 使用示例
### 简单加密/解密
```go
package main
import (
"fmt"
"github.com/landaiqing/go-xcipher"
"golang.org/x/crypto/chacha20poly1305"
)
func main() {
// 创建一个32字节的密钥这里只是示例实际应用中应安全生成和存储密钥
key := make([]byte, chacha20poly1305.KeySize)
// 初始化加密器
cipher := xcipher.NewXCipher(key)
// 要加密的数据
plaintext := []byte("敏感数据")
// 可选的附加验证数据
additionalData := []byte("header")
// 加密
ciphertext, err := cipher.Encrypt(plaintext, additionalData)
if err != nil {
panic(err)
}
// 解密
decrypted, err := cipher.Decrypt(ciphertext, additionalData)
if err != nil {
panic(err)
}
fmt.Println("解密后:", string(decrypted))
}
```
### 流式加密
```go
package main
import (
"fmt"
"os"
"github.com/landaiqing/go-xcipher"
"golang.org/x/crypto/chacha20poly1305"
)
func main() {
// 创建密钥
key := make([]byte, chacha20poly1305.KeySize)
// 初始化加密器
cipher := xcipher.NewXCipher(key)
// 打开要加密的文件
inputFile, _ := os.Open("大文件.dat")
defer inputFile.Close()
// 创建输出文件
outputFile, _ := os.Create("大文件.encrypted")
defer outputFile.Close()
// 设置流选项
options := xcipher.DefaultStreamOptions()
options.UseParallel = true // 启用并行处理
options.BufferSize = 64 * 1024 // 设置缓冲区大小
options.CollectStats = true // 收集性能统计
// 加密流
stats, err := cipher.EncryptStreamWithOptions(inputFile, outputFile, options)
if err != nil {
panic(err)
}
// 显示性能统计
fmt.Printf("处理用时: %v\n", stats.Duration())
fmt.Printf("处理速度: %.2f MB/s\n", stats.Throughput)
}
```
## 📋 API 文档
### 核心类型
```go
type XCipher struct {
// 内含字段未导出
}
// 流处理的统计信息
type StreamStats struct {
StartTime time.Time
EndTime time.Time
BytesProcessed int64
BlocksProcessed int
AvgBlockSize float64
Throughput float64
ParallelProcessing bool
WorkerCount int
BufferSize int
}
// 流处理选项
type StreamOptions struct {
BufferSize int
UseParallel bool
MaxWorkers int
AdditionalData []byte
CollectStats bool
CancelChan <-chan struct{}
}
```
### 主要函数和方法
- `NewXCipher(key []byte) *XCipher` - 创建新的加密器实例
- `(x *XCipher) Encrypt(data, additionalData []byte) ([]byte, error)` - 加密数据
- `(x *XCipher) Decrypt(cipherData, additionalData []byte) ([]byte, error)` - 解密数据
- `(x *XCipher) EncryptStream(reader io.Reader, writer io.Writer, additionalData []byte) error` - 使用默认选项加密流
- `(x *XCipher) DecryptStream(reader io.Reader, writer io.Writer, additionalData []byte) error` - 使用默认选项解密流
- `(x *XCipher) EncryptStreamWithOptions(reader io.Reader, writer io.Writer, options StreamOptions) (*StreamStats, error)` - 使用自定义选项加密流
- `(x *XCipher) DecryptStreamWithOptions(reader io.Reader, writer io.Writer, options StreamOptions) (*StreamStats, error)` - 使用自定义选项解密流
- `DefaultStreamOptions() StreamOptions` - 获取默认流处理选项
## 🚀 性能
go-xcipher 经过优化,可处理各种规模的数据,从小型消息到大型文件。以下是一些性能基准测试结果:
- 小数据包加密:~1.5 GB/s
- 大文件并行加密:~4.0 GB/s (取决于CPU核心数和硬件)
- 内存效率:即使处理大文件,内存使用量仍保持在较低水平
## 🤝 贡献
欢迎提交 Issues 和 Pull Requests 帮助改进 go-xcipher。您可以通过以下方式贡献
1. 报告 Bug
2. 提交功能请求
3. 提交代码改进
4. 完善文档
## 📜 许可证
go-xcipher 使用 Apache License 2.0 许可证 - 详见 [LICENSE](LICENSE) 文件。

7
go.mod Normal file
View File

@@ -0,0 +1,7 @@
module go-xcipher
go 1.24.0
require golang.org/x/crypto v0.36.0
require golang.org/x/sys v0.31.0 // indirect

4
go.sum Normal file
View File

@@ -0,0 +1,4 @@
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=

BIN
golang_logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 172 KiB

BIN
test.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 MiB

983
xcipher.go Normal file
View File

@@ -0,0 +1,983 @@
package xcipher
import (
"crypto/cipher"
"crypto/rand"
"encoding/binary"
"errors"
"fmt"
"io"
"log"
"runtime"
"sync"
"time"
"golang.org/x/crypto/chacha20poly1305"
)
const (
nonceSize = chacha20poly1305.NonceSizeX
minCiphertextSize = nonceSize + 16 // 16 is the minimum size of Poly1305 authentication tag
poolBufferSize = 32 * 1024 // 32KB memory pool unit
largeBufferSize = 256 * 1024 // 256KB large buffer pool unit
parallelThreshold = 1 * 1024 * 1024 // 1MB parallel processing threshold
streamBufferSize = 64 * 1024 // 64KB stream processing buffer size
minWorkers = 2 // Minimum number of parallel workers
maxWorkers = 8 // Maximum number of parallel workers (increased from 4)
minBufferSize = 8 * 1024 // Minimum buffer size (8KB)
maxBufferSize = 1024 * 1024 // Maximum buffer size (1MB)
optimalBlockSize = 64 * 1024 // 64KB is typically optimal for ChaCha20-Poly1305
batchSize = 8 // 批处理队列大小
)
// Define error constants for consistent error handling
var (
ErrInvalidKeySize = errors.New("xcipher: invalid key size")
ErrCiphertextShort = errors.New("xcipher: ciphertext too short")
ErrNonceGeneration = errors.New("xcipher: nonce generation failed")
ErrEmptyPlaintext = errors.New("xcipher: empty plaintext")
ErrAuthenticationFailed = errors.New("xcipher: authentication failed")
ErrReadFailed = errors.New("xcipher: read from input stream failed")
ErrWriteFailed = errors.New("xcipher: write to output stream failed")
ErrBufferSizeTooSmall = errors.New("xcipher: buffer size too small")
ErrBufferSizeTooLarge = errors.New("xcipher: buffer size too large")
ErrOperationCancelled = errors.New("xcipher: operation was cancelled")
)
// Global memory pool to reduce small object allocations
var bufferPool = &sync.Pool{
New: func() interface{} {
return make([]byte, 0, poolBufferSize)
},
}
// Global memory pool for large buffers used in parallel processing
var largeBufferPool = &sync.Pool{
New: func() interface{} {
return make([]byte, 0, largeBufferSize)
},
}
// 获取指定容量的缓冲区,优先从对象池获取
func getBuffer(capacity int) []byte {
// 小缓冲区直接从常规池获取
if capacity <= poolBufferSize {
buf := bufferPool.Get().([]byte)
if cap(buf) >= capacity {
return buf[:capacity]
}
bufferPool.Put(buf[:0]) // 返回太小的缓冲区
} else if capacity <= largeBufferSize {
// 大缓冲区从大缓冲池获取
buf := largeBufferPool.Get().([]byte)
if cap(buf) >= capacity {
return buf[:capacity]
}
largeBufferPool.Put(buf[:0]) // 返回太小的缓冲区
}
// 池中没有足够大的缓冲区,创建新的
return make([]byte, capacity)
}
// 返回缓冲区到适当的池
func putBuffer(buf []byte) {
if buf == nil {
return
}
c := cap(buf)
if c <= poolBufferSize {
bufferPool.Put(buf[:0])
} else if c <= largeBufferSize {
largeBufferPool.Put(buf[:0])
}
// 超过大小的不放回池中
}
type XCipher struct {
aead cipher.AEAD
overhead int // Cache overhead to reduce repeated calls
}
func NewXCipher(key []byte) *XCipher {
if len(key) != chacha20poly1305.KeySize {
log.Panic(fmt.Errorf("%w: expected %d bytes, got %d",
ErrInvalidKeySize, chacha20poly1305.KeySize, len(key)))
return nil
}
aead, err := chacha20poly1305.NewX(key)
if err != nil {
log.Panic(fmt.Errorf("xcipher: create aead failed: %w", err))
return nil
}
return &XCipher{
aead: aead,
overhead: aead.Overhead(),
}
}
func (x *XCipher) Encrypt(data, additionalData []byte) ([]byte, error) {
if len(data) == 0 {
return nil, ErrEmptyPlaintext
}
// 检查是否超过阈值使用直接分配
if len(data) > parallelThreshold {
return x.encryptDirect(data, additionalData)
}
// 使用新的缓冲区池函数获取缓冲区
requiredCapacity := nonceSize + len(data) + x.overhead
buf := getBuffer(nonceSize) // 先获取nonceSize大小的缓冲区
defer func() {
// 如果发生错误,确保缓冲区被返回到池中
if len(buf) == nonceSize {
putBuffer(buf)
}
}()
// 生成随机nonce
if _, err := rand.Read(buf); err != nil {
return nil, ErrNonceGeneration
}
// 扩展缓冲区以容纳加密数据
if cap(buf) < requiredCapacity {
// 当前缓冲区太小,获取一个更大的
oldBuf := buf
buf = make([]byte, nonceSize, requiredCapacity)
copy(buf, oldBuf)
putBuffer(oldBuf) // 返回旧缓冲区到池中
}
// 使用优化的AEAD.Seal调用
result := x.aead.Seal(buf, buf[:nonceSize], data, additionalData)
return result, nil
}
func (x *XCipher) encryptDirect(data, additionalData []byte) ([]byte, error) {
// 预分配nonce缓冲区
nonce := getBuffer(nonceSize)
if _, err := rand.Read(nonce); err != nil {
putBuffer(nonce)
return nil, ErrNonceGeneration
}
// 预分配足够大的ciphertext缓冲区
ciphertext := make([]byte, nonceSize+len(data)+x.overhead)
copy(ciphertext, nonce)
putBuffer(nonce) // 不再需要单独的nonce缓冲区
// 直接在目标缓冲区上执行加密操作
x.aead.Seal(
ciphertext[nonceSize:nonceSize],
ciphertext[:nonceSize],
data,
additionalData,
)
return ciphertext, nil
}
// Decrypt decrypts data
func (x *XCipher) Decrypt(cipherData, additionalData []byte) ([]byte, error) {
if len(cipherData) < minCiphertextSize {
return nil, ErrCiphertextShort
}
nonce := cipherData[:nonceSize]
data := cipherData[nonceSize:]
// 估算明文大小并预分配缓冲区
plaintextSize := len(data) - x.overhead
if plaintextSize <= 0 {
return nil, ErrCiphertextShort
}
// 对于小数据,使用内存池 - 但不重用输入缓冲区,避免重叠
if plaintextSize <= largeBufferSize {
// 注意:这里我们总是创建一个新的缓冲区用于结果
// 而不是尝试在输入缓冲区上原地解密,这会导致缓冲区重叠错误
resultBuf := make([]byte, 0, plaintextSize)
plaintext, err := x.aead.Open(resultBuf, nonce, data, additionalData)
if err != nil {
return nil, ErrAuthenticationFailed
}
return plaintext, nil
}
// 对于大数据,直接分配并返回
return x.aead.Open(nil, nonce, data, additionalData)
}
// StreamStats contains statistics for stream encryption/decryption
type StreamStats struct {
// Start time
StartTime time.Time
// End time
EndTime time.Time
// Total processed bytes
BytesProcessed int64
// Number of blocks
BlocksProcessed int
// Average block size
AvgBlockSize float64
// Processing speed (MB/s)
Throughput float64
// Whether parallel processing was used
ParallelProcessing bool
// Number of worker threads
WorkerCount int
// Buffer size
BufferSize int
}
// Duration returns the processing duration
func (s *StreamStats) Duration() time.Duration {
return s.EndTime.Sub(s.StartTime)
}
// StreamOptions used to configure stream encryption/decryption options
type StreamOptions struct {
// Buffer size
BufferSize int
// Whether to use parallel processing
UseParallel bool
// Maximum number of worker threads
MaxWorkers int
// Additional authenticated data
AdditionalData []byte
// Whether to collect statistics
CollectStats bool
// Cancel signal
CancelChan <-chan struct{}
}
// DefaultStreamOptions returns default stream encryption/decryption options
func DefaultStreamOptions() StreamOptions {
return StreamOptions{
BufferSize: streamBufferSize,
UseParallel: false,
MaxWorkers: maxWorkers,
AdditionalData: nil,
CollectStats: false,
CancelChan: nil,
}
}
// EncryptStreamWithOptions performs stream encryption using configuration options
func (x *XCipher) EncryptStreamWithOptions(reader io.Reader, writer io.Writer, options StreamOptions) (stats *StreamStats, err error) {
// 自动检测是否应该使用并行处理
if options.UseParallel == false && options.BufferSize >= parallelThreshold/2 {
// 如果缓冲区很大但未启用并行,自动启用
options.UseParallel = true
if options.MaxWorkers <= 0 {
options.MaxWorkers = calculateOptimalWorkers(options.BufferSize, maxWorkers)
}
}
// Initialize statistics
if options.CollectStats {
stats = &StreamStats{
StartTime: time.Now(),
ParallelProcessing: options.UseParallel,
WorkerCount: options.MaxWorkers,
BufferSize: options.BufferSize,
}
defer func() {
stats.EndTime = time.Now()
if stats.BytesProcessed > 0 {
durationSec := stats.Duration().Seconds()
if durationSec > 0 {
stats.Throughput = float64(stats.BytesProcessed) / durationSec / 1e6 // MB/s
}
if stats.BlocksProcessed > 0 {
stats.AvgBlockSize = float64(stats.BytesProcessed) / float64(stats.BlocksProcessed)
}
}
}()
}
// Validate and adjust options
if options.BufferSize <= 0 {
options.BufferSize = streamBufferSize
} else if options.BufferSize < minBufferSize {
return stats, fmt.Errorf("%w: %d is less than minimum %d",
ErrBufferSizeTooSmall, options.BufferSize, minBufferSize)
} else if options.BufferSize > maxBufferSize {
return stats, fmt.Errorf("%w: %d is greater than maximum %d",
ErrBufferSizeTooLarge, options.BufferSize, maxBufferSize)
}
if options.UseParallel {
if options.MaxWorkers <= 0 {
options.MaxWorkers = maxWorkers
} else if options.MaxWorkers > runtime.NumCPU()*2 {
log.Printf("Warning: Number of worker threads %d exceeds twice the number of CPU cores (%d)",
options.MaxWorkers, runtime.NumCPU()*2)
}
// Use parallel implementation
return x.encryptStreamParallelWithOptions(reader, writer, options, stats)
}
// Generate random nonce
nonce := make([]byte, nonceSize)
if _, err := rand.Read(nonce); err != nil {
return stats, fmt.Errorf("%w: %v", ErrNonceGeneration, err)
}
// Write nonce first
if _, err := writer.Write(nonce); err != nil {
return stats, fmt.Errorf("%w: %v", ErrWriteFailed, err)
}
// Get buffer from memory pool or create a new one
var buffer []byte
var sealed []byte
// Check if buffer in memory pool is large enough
bufFromPool := bufferPool.Get().([]byte)
if cap(bufFromPool) >= options.BufferSize {
buffer = bufFromPool[:options.BufferSize]
} else {
bufferPool.Put(bufFromPool[:0]) // Return buffer that's not large enough
buffer = make([]byte, options.BufferSize)
}
defer bufferPool.Put(buffer[:0])
// Allocate ciphertext buffer
sealed = make([]byte, 0, options.BufferSize+x.overhead)
// Use counter to track block sequence
var counter uint64 = 0
var bytesProcessed int64 = 0
var blocksProcessed = 0
for {
// Check cancel signal
if options.CancelChan != nil {
select {
case <-options.CancelChan:
return stats, ErrOperationCancelled
default:
// Continue processing
}
}
// Read plaintext data
n, err := reader.Read(buffer)
if err != nil && err != io.EOF {
return stats, fmt.Errorf("%w: %v", ErrReadFailed, err)
}
if n > 0 {
// Update statistics
bytesProcessed += int64(n)
blocksProcessed++
// Update nonce - use counter
binary.LittleEndian.PutUint64(nonce, counter)
counter++
// Encrypt data block
encrypted := x.aead.Seal(sealed[:0], nonce, buffer[:n], options.AdditionalData)
// Write encrypted data
if _, err := writer.Write(encrypted); err != nil {
return stats, fmt.Errorf("%w: %v", ErrWriteFailed, err)
}
}
if err == io.EOF {
break
}
}
// Update statistics
if stats != nil {
stats.BytesProcessed = bytesProcessed
stats.BlocksProcessed = blocksProcessed
}
return stats, nil
}
// Internal method for parallel encryption with options
func (x *XCipher) encryptStreamParallelWithOptions(reader io.Reader, writer io.Writer, options StreamOptions, stats *StreamStats) (*StreamStats, error) {
// Generate random base nonce
baseNonce := make([]byte, nonceSize)
if _, err := rand.Read(baseNonce); err != nil {
return stats, ErrNonceGeneration
}
// Write base nonce first
if _, err := writer.Write(baseNonce); err != nil {
return stats, fmt.Errorf("%w: %v", ErrWriteFailed, err)
}
// Set the number of worker threads, not exceeding CPU count and option limit
workers := runtime.NumCPU()
if workers > options.MaxWorkers {
workers = options.MaxWorkers
}
// 调整作业队列大小以减少争用,使用更大的值
workerQueueSize := workers * 4
// Create worker pool
jobs := make(chan job, workerQueueSize)
results := make(chan result, workerQueueSize)
errorsChannel := make(chan error, 1)
var wg sync.WaitGroup
// 预先分配一个一致的位置用于存储已处理的结果
var bytesProcessed int64 = 0
var blocksProcessed = 0
// Start worker threads
for i := 0; i < workers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
// 每个工作线程预分配自己的加密缓冲区,避免每次分配
encBuf := make([]byte, 0, options.BufferSize+x.overhead)
for job := range jobs {
// Create unique nonce for each block
blockNonce := make([]byte, nonceSize)
copy(blockNonce, baseNonce)
binary.LittleEndian.PutUint64(blockNonce, job.id)
// Encrypt data block - 重用预分配的缓冲区而不是每次创建新的
encrypted := x.aead.Seal(encBuf[:0], blockNonce, job.data, options.AdditionalData)
// 把数据复制到中间结果,避免缓冲区被后续操作覆盖
resultData := getBuffer(len(encrypted))
copy(resultData, encrypted)
// Send result
results <- result{
id: job.id,
data: resultData,
}
// 完成后释放缓冲区
putBuffer(job.data)
}
}()
}
// Start result collection and writing thread
resultsDone := make(chan struct{})
go func() {
pendingResults := make(map[uint64][]byte)
nextID := uint64(0)
for r := range results {
pendingResults[r.id] = r.data
// Write results in order
for {
if data, ok := pendingResults[nextID]; ok {
// Write block size
sizeBytes := make([]byte, 4)
binary.LittleEndian.PutUint32(sizeBytes, uint32(len(data)))
if _, err := writer.Write(sizeBytes); err != nil {
errorsChannel <- fmt.Errorf("%w: %v", ErrWriteFailed, err)
return
}
// Write data
if _, err := writer.Write(data); err != nil {
errorsChannel <- fmt.Errorf("%w: %v", ErrWriteFailed, err)
return
}
// 更新统计数据
if stats != nil {
bytesProcessed += int64(len(data))
blocksProcessed++
}
// 返回缓冲区到池中
putBuffer(data)
delete(pendingResults, nextID)
nextID++
} else {
break
}
}
}
close(resultsDone) // Signal that result processing is complete
}()
// Read and assign work
buffer := getBuffer(options.BufferSize)
defer putBuffer(buffer)
var jobID uint64 = 0
// 添加批处理机制,减少通道争用
const batchSize = 16 // 根据实际情况调整
dataBatch := make([][]byte, 0, batchSize)
idBatch := make([]uint64, 0, batchSize)
for {
// Check cancel signal
if options.CancelChan != nil {
select {
case <-options.CancelChan:
return stats, ErrOperationCancelled
default:
// Continue processing
}
}
n, err := reader.Read(buffer)
if err != nil && err != io.EOF {
return stats, fmt.Errorf("%w: %v", ErrReadFailed, err)
}
if n > 0 {
// Copy data to prevent overwriting
data := getBuffer(n)
copy(data, buffer[:n])
// 添加到批次
dataBatch = append(dataBatch, data)
idBatch = append(idBatch, jobID)
jobID++
// 当批次满了或到达EOF时发送
if len(dataBatch) >= batchSize || err == io.EOF {
for i := range dataBatch {
// Send work
select {
case jobs <- job{
id: idBatch[i],
data: dataBatch[i],
}:
case <-options.CancelChan:
// 被取消的情况下清理资源
for _, d := range dataBatch {
putBuffer(d)
}
return stats, ErrOperationCancelled
}
}
// 清空批次
dataBatch = dataBatch[:0]
idBatch = idBatch[:0]
}
}
if err == io.EOF {
break
}
}
// 发送剩余批次
for i := range dataBatch {
jobs <- job{
id: idBatch[i],
data: dataBatch[i],
}
}
// Close jobs channel and wait for all workers to complete
close(jobs)
wg.Wait()
// Close results channel after all work is done
close(results)
// Wait for result processing to complete
<-resultsDone
// 更新统计信息
if stats != nil {
stats.BytesProcessed = bytesProcessed
stats.BlocksProcessed = blocksProcessed
}
// Check for errors
select {
case err := <-errorsChannel:
return stats, err
default:
return stats, nil
}
}
// DecryptStreamWithOptions performs stream decryption with configuration options
func (x *XCipher) DecryptStreamWithOptions(reader io.Reader, writer io.Writer, options StreamOptions) (*StreamStats, error) {
// 自动检测是否应该使用并行处理
if options.UseParallel == false && options.BufferSize >= parallelThreshold/2 {
// 如果缓冲区很大但未启用并行,自动启用
options.UseParallel = true
if options.MaxWorkers <= 0 {
options.MaxWorkers = calculateOptimalWorkers(options.BufferSize, maxWorkers)
}
}
// Validate and adjust options, similar to encryption
if options.BufferSize <= 0 {
options.BufferSize = streamBufferSize
} else if options.BufferSize < minBufferSize {
options.BufferSize = minBufferSize
} else if options.BufferSize > maxBufferSize {
options.BufferSize = maxBufferSize
}
if options.UseParallel {
if options.MaxWorkers <= 0 {
options.MaxWorkers = maxWorkers
}
// Use parallel implementation
return x.decryptStreamParallelWithOptions(reader, writer, options)
}
// Read nonce
nonce := make([]byte, nonceSize)
if _, err := io.ReadFull(reader, nonce); err != nil {
return nil, fmt.Errorf("%w: failed to read nonce: %v", ErrReadFailed, err)
}
// Get buffer from memory pool or create a new one
var encBuffer []byte
var decBuffer []byte
// Check if buffer in memory pool is large enough
bufFromPool := bufferPool.Get().([]byte)
if cap(bufFromPool) >= options.BufferSize+x.overhead {
encBuffer = bufFromPool[:options.BufferSize+x.overhead]
} else {
bufferPool.Put(bufFromPool[:0]) // Return buffer that's not large enough
encBuffer = make([]byte, options.BufferSize+x.overhead)
}
defer bufferPool.Put(encBuffer[:0])
// Allocate decryption buffer
decBuffer = make([]byte, 0, options.BufferSize)
// Use counter to track block sequence
var counter uint64 = 0
for {
// Read encrypted data
n, err := reader.Read(encBuffer)
if err != nil && err != io.EOF {
return nil, fmt.Errorf("%w: %v", ErrReadFailed, err)
}
if n > 0 {
// Update nonce - use counter
binary.LittleEndian.PutUint64(nonce, counter)
counter++
// Decrypt data block
decrypted, err := x.aead.Open(decBuffer[:0], nonce, encBuffer[:n], options.AdditionalData)
if err != nil {
return nil, ErrAuthenticationFailed
}
// Write decrypted data
if _, err := writer.Write(decrypted); err != nil {
return nil, fmt.Errorf("%w: %v", ErrWriteFailed, err)
}
}
if err == io.EOF {
break
}
}
return nil, nil
}
// Internal method for parallel decryption with options
func (x *XCipher) decryptStreamParallelWithOptions(reader io.Reader, writer io.Writer, options StreamOptions) (*StreamStats, error) {
// Initialize statistics
var stats *StreamStats
if options.CollectStats {
stats = &StreamStats{
StartTime: time.Now(),
ParallelProcessing: true,
WorkerCount: options.MaxWorkers,
BufferSize: options.BufferSize,
}
defer func() {
stats.EndTime = time.Now()
if stats.BytesProcessed > 0 {
durationSec := stats.Duration().Seconds()
if durationSec > 0 {
stats.Throughput = float64(stats.BytesProcessed) / durationSec / 1e6 // MB/s
}
if stats.BlocksProcessed > 0 {
stats.AvgBlockSize = float64(stats.BytesProcessed) / float64(stats.BlocksProcessed)
}
}
}()
}
// Read base nonce
baseNonce := make([]byte, nonceSize)
if _, err := io.ReadFull(reader, baseNonce); err != nil {
return stats, fmt.Errorf("%w: failed to read nonce: %v", ErrReadFailed, err)
}
// Set the number of worker threads - 使用优化的工作线程计算
workers := calculateOptimalWorkers(options.BufferSize, options.MaxWorkers)
// 调整作业队列大小以减少争用
workerQueueSize := workers * 4
// Create worker pool
jobs := make(chan job, workerQueueSize)
results := make(chan result, workerQueueSize)
errorsChannel := make(chan error, 1)
var wg sync.WaitGroup
// Start worker threads
for i := 0; i < workers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
// 每个工作线程预分配自己的解密缓冲区,避免每次分配
decBuf := make([]byte, 0, options.BufferSize)
for job := range jobs {
// Create unique nonce for each block
blockNonce := make([]byte, nonceSize)
copy(blockNonce, baseNonce)
binary.LittleEndian.PutUint64(blockNonce, job.id)
// Decrypt data block
decrypted, err := x.aead.Open(decBuf[:0], blockNonce, job.data, options.AdditionalData)
if err != nil {
select {
case errorsChannel <- ErrAuthenticationFailed:
default:
// If an error is already sent, don't send another one
}
putBuffer(job.data) // 释放缓冲区
continue // Continue processing other blocks instead of returning immediately
}
// 把数据复制到中间结果,避免缓冲区被后续操作覆盖
resultData := getBuffer(len(decrypted))
copy(resultData, decrypted)
// Send result
results <- result{
id: job.id,
data: resultData,
}
// 释放输入缓冲区
putBuffer(job.data)
}
}()
}
// Start result collection and writing thread
resultsDone := make(chan struct{})
go func() {
pendingResults := make(map[uint64][]byte)
nextID := uint64(0)
for r := range results {
pendingResults[r.id] = r.data
// Write results in order
for {
if data, ok := pendingResults[nextID]; ok {
if _, err := writer.Write(data); err != nil {
errorsChannel <- fmt.Errorf("%w: %v", ErrWriteFailed, err)
return
}
if stats != nil {
stats.BytesProcessed += int64(len(data))
stats.BlocksProcessed++
}
// 返回缓冲区到池中
putBuffer(data)
delete(pendingResults, nextID)
nextID++
} else {
break
}
}
}
close(resultsDone)
}()
// Read and assign work
sizeBytes := make([]byte, 4)
var jobID uint64 = 0
// 添加批处理机制,减少通道争用
dataBatch := make([][]byte, 0, batchSize)
idBatch := make([]uint64, 0, batchSize)
for {
// Check cancel signal
if options.CancelChan != nil {
select {
case <-options.CancelChan:
// 优雅地处理取消
close(jobs)
wg.Wait()
close(results)
<-resultsDone
return stats, ErrOperationCancelled
default:
// Continue processing
}
}
// Read block size
_, err := io.ReadFull(reader, sizeBytes)
if err != nil {
if err == io.EOF {
break
}
return stats, fmt.Errorf("%w: %v", ErrReadFailed, err)
}
blockSize := binary.LittleEndian.Uint32(sizeBytes)
encryptedBlock := getBuffer(int(blockSize))
// Read encrypted data block
_, err = io.ReadFull(reader, encryptedBlock)
if err != nil {
putBuffer(encryptedBlock) // 释放缓冲区
return stats, fmt.Errorf("%w: %v", ErrReadFailed, err)
}
// 添加到批次
dataBatch = append(dataBatch, encryptedBlock)
idBatch = append(idBatch, jobID)
jobID++
// 当批次满了时发送
if len(dataBatch) >= batchSize {
for i := range dataBatch {
select {
case jobs <- job{
id: idBatch[i],
data: dataBatch[i],
}:
case <-options.CancelChan:
// 被取消的情况下清理资源
for _, d := range dataBatch {
putBuffer(d)
}
return stats, ErrOperationCancelled
}
}
// 清空批次
dataBatch = dataBatch[:0]
idBatch = idBatch[:0]
}
}
// 发送剩余批次
for i := range dataBatch {
jobs <- job{
id: idBatch[i],
data: dataBatch[i],
}
}
// Close jobs channel and wait for all workers to complete
close(jobs)
wg.Wait()
// Close results channel after all workers are done
close(results)
// Wait for result processing to complete
<-resultsDone
// Check for errors
select {
case err := <-errorsChannel:
return stats, err
default:
return stats, nil
}
}
// EncryptStream performs stream encryption with default options
func (x *XCipher) EncryptStream(reader io.Reader, writer io.Writer, additionalData []byte) error {
options := DefaultStreamOptions()
options.AdditionalData = additionalData
_, err := x.EncryptStreamWithOptions(reader, writer, options)
return err
}
func (x *XCipher) DecryptStream(reader io.Reader, writer io.Writer, additionalData []byte) error {
options := DefaultStreamOptions()
options.AdditionalData = additionalData
_, err := x.DecryptStreamWithOptions(reader, writer, options)
return err
}
// Job and result structures
type job struct {
id uint64
data []byte
}
type result struct {
id uint64
data []byte
}
// 新增函数 - 优化的工作线程数目计算
func calculateOptimalWorkers(dataSize int, maxWorkers int) int {
cpuCount := runtime.NumCPU()
// 对于小数据量,使用较少的工作线程
if dataSize < 4*1024*1024 { // 4MB
workers := cpuCount / 2
if workers < minWorkers {
return minWorkers
}
if workers > maxWorkers {
return maxWorkers
}
return workers
}
// 对于大数据量使用更多工作线程但不超过CPU数
workers := cpuCount
if workers > maxWorkers {
return maxWorkers
}
return workers
}
// 新增函数 - 计算最佳的缓冲区大小
func calculateOptimalBufferSize(options StreamOptions) int {
// 检查用户指定的缓冲区大小
if options.BufferSize > 0 {
if options.BufferSize < minBufferSize {
return minBufferSize
}
if options.BufferSize > maxBufferSize {
return maxBufferSize
}
return options.BufferSize
}
// 未指定时使用默认值
return optimalBlockSize
}

613
xcipher_bench_test.go Normal file
View File

@@ -0,0 +1,613 @@
package xcipher
import (
"bytes"
"crypto/rand"
"fmt"
"golang.org/x/crypto/chacha20poly1305"
"io"
"io/ioutil"
"os"
"testing"
)
// genRandomDataForBench 生成指定大小的随机数据(基准测试专用)
func genRandomDataForBench(size int) []byte {
data := make([]byte, size)
if _, err := rand.Read(data); err != nil {
panic(err)
}
return data
}
// Create temporary file
func createBenchTempFile(b *testing.B, data []byte) string {
tempFile, err := os.CreateTemp("", "xcipher-bench-*")
if err != nil {
b.Fatalf("Failed to create temporary file: %v", err)
}
if _, err := tempFile.Write(data); err != nil {
b.Fatalf("Failed to write to temporary file: %v", err)
}
tempFile.Close()
return tempFile.Name()
}
// BenchmarkEncrypt 测试不同大小数据的加密性能
func BenchmarkEncrypt(b *testing.B) {
sizes := []int{
1 * 1024, // 1KB
16 * 1024, // 16KB
64 * 1024, // 64KB
256 * 1024, // 256KB
1 * 1024 * 1024, // 1MB
4 * 1024 * 1024, // 4MB
}
for _, size := range sizes {
b.Run(byteCountToString(int64(size)), func(b *testing.B) {
data := genRandomDataForBench(size)
cipher := NewXCipher(benchTestKey)
b.ResetTimer()
b.SetBytes(int64(size))
for i := 0; i < b.N; i++ {
_, err := cipher.Encrypt(data, nil)
if err != nil {
b.Fatal(err)
}
}
})
}
}
// BenchmarkDecrypt 测试不同大小数据的解密性能
func BenchmarkDecrypt(b *testing.B) {
sizes := []int{
1 * 1024, // 1KB
16 * 1024, // 16KB
64 * 1024, // 64KB
256 * 1024, // 256KB
1 * 1024 * 1024, // 1MB
4 * 1024 * 1024, // 4MB
}
for _, size := range sizes {
b.Run(byteCountToString(int64(size)), func(b *testing.B) {
data := genRandomDataForBench(size)
cipher := NewXCipher(benchTestKey)
encrypted, err := cipher.Encrypt(data, nil)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
b.SetBytes(int64(size))
for i := 0; i < b.N; i++ {
_, err := cipher.Decrypt(encrypted, nil)
if err != nil {
b.Fatal(err)
}
}
})
}
}
// BenchmarkEncryptLarge benchmark large data encryption performance
func BenchmarkEncryptLarge(b *testing.B) {
key := make([]byte, chacha20poly1305.KeySize)
rand.Read(key)
x := NewXCipher(key)
plaintext := make([]byte, 1<<20) // 1MB data
additionalData := []byte("test")
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = x.Encrypt(plaintext, additionalData)
}
}
// BenchmarkDecryptLarge benchmark large data decryption performance
func BenchmarkDecryptLarge(b *testing.B) {
key := make([]byte, chacha20poly1305.KeySize)
rand.Read(key)
x := NewXCipher(key)
plaintext := make([]byte, 1<<20) // 1MB data
additionalData := []byte("test")
ciphertext, _ := x.Encrypt(plaintext, additionalData)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = x.Decrypt(ciphertext, additionalData)
}
}
// Stream encryption/decryption benchmarks
// BenchmarkStreamEncrypt tests stream encryption performance with different data sizes
func BenchmarkStreamEncrypt(b *testing.B) {
// Test different data sizes
sizes := []int{
1 << 10, // 1KB
1 << 14, // 16KB
1 << 16, // 64KB
1 << 18, // 256KB
1 << 20, // 1MB
1 << 22, // 4MB
}
for _, size := range sizes {
b.Run(fmt.Sprintf("Size_%dKB", size/1024), func(b *testing.B) {
key := make([]byte, chacha20poly1305.KeySize)
rand.Read(key)
x := NewXCipher(key)
plaintext := genRandomDataForBench(size)
additionalData := []byte("stream-test")
b.ResetTimer()
b.SetBytes(int64(size))
for i := 0; i < b.N; i++ {
b.StopTimer()
reader := bytes.NewReader(plaintext)
writer := ioutil.Discard
b.StartTimer()
_ = x.EncryptStream(reader, writer, additionalData)
}
})
}
}
// BenchmarkStreamDecrypt tests stream decryption performance with different data sizes
func BenchmarkStreamDecrypt(b *testing.B) {
// Test different data sizes
sizes := []int{
1 << 10, // 1KB
1 << 14, // 16KB
1 << 16, // 64KB
1 << 18, // 256KB
1 << 20, // 1MB
1 << 22, // 4MB
}
for _, size := range sizes {
b.Run(fmt.Sprintf("Size_%dKB", size/1024), func(b *testing.B) {
key := make([]byte, chacha20poly1305.KeySize)
rand.Read(key)
x := NewXCipher(key)
plaintext := genRandomDataForBench(size)
additionalData := []byte("stream-test")
// Encrypt data first
var encBuf bytes.Buffer
_ = x.EncryptStream(bytes.NewReader(plaintext), &encBuf, additionalData)
encData := encBuf.Bytes()
b.ResetTimer()
b.SetBytes(int64(size))
for i := 0; i < b.N; i++ {
b.StopTimer()
reader := bytes.NewReader(encData)
writer := ioutil.Discard
b.StartTimer()
_ = x.DecryptStream(reader, writer, additionalData)
}
})
}
}
// BenchmarkStreamParallelVsSerial compares parallel and serial stream encryption performance
func BenchmarkStreamParallelVsSerial(b *testing.B) {
// Use larger data to test parallel advantage (10MB)
dataSize := 10 * 1024 * 1024
benchCases := []struct {
name string
useParallel bool
bufferSize int
}{
{"Serial_Default", false, streamBufferSize},
{"Serial_SmallBuffer", false, 16 * 1024},
{"Serial_LargeBuffer", false, 256 * 1024},
{"Parallel_Default", true, streamBufferSize},
{"Parallel_SmallBuffer", true, 16 * 1024},
{"Parallel_LargeBuffer", true, 256 * 1024},
}
// Prepare benchmark data
data := genRandomDataForBench(dataSize)
tempFile := createBenchTempFile(b, data)
defer os.Remove(tempFile)
for _, bc := range benchCases {
b.Run(bc.name, func(b *testing.B) {
key := make([]byte, chacha20poly1305.KeySize)
rand.Read(key)
x := NewXCipher(key)
additionalData := []byte("parallel-test")
b.ResetTimer()
b.SetBytes(int64(dataSize))
for i := 0; i < b.N; i++ {
b.StopTimer()
// Open input file
inFile, err := os.Open(tempFile)
if err != nil {
b.Fatalf("Failed to open test file: %v", err)
}
// Create a temporary discard writer, but use buffer to avoid frequent GC
discardBuf := bytes.NewBuffer(make([]byte, 0, 64*1024))
discardWriter := &writerDiscardButBuffer{buf: discardBuf}
// Set options
options := DefaultStreamOptions()
options.UseParallel = bc.useParallel
options.BufferSize = bc.bufferSize
options.AdditionalData = additionalData
b.StartTimer()
// Perform encryption
if _, err := x.EncryptStreamWithOptions(inFile, discardWriter, options); err != nil {
b.Fatalf("Encryption failed: %v", err)
}
b.StopTimer()
inFile.Close()
}
})
}
}
// writerDiscardButBuffer test writer that discards data but uses buffer to avoid frequent GC
type writerDiscardButBuffer struct {
buf *bytes.Buffer
}
func (w *writerDiscardButBuffer) Write(p []byte) (n int, err error) {
// Reset buffer if too large to avoid unlimited growth
if w.buf.Len() > 1024*1024 {
w.buf.Reset()
}
return w.buf.Write(p)
}
// BenchmarkStreamDifferentBufferSizes tests the impact of different buffer sizes on performance
func BenchmarkStreamDifferentBufferSizes(b *testing.B) {
dataSize := 5 * 1024 * 1024 // 5MB
bufferSizes := []int{
8 * 1024, // 8KB
16 * 1024, // 16KB
32 * 1024, // 32KB
64 * 1024, // 64KB (default)
128 * 1024, // 128KB
256 * 1024, // 256KB
512 * 1024, // 512KB
}
// Prepare test data
data := genRandomDataForBench(dataSize)
for _, bufSize := range bufferSizes {
b.Run(fmt.Sprintf("BufferSize_%dKB", bufSize/1024), func(b *testing.B) {
key := make([]byte, chacha20poly1305.KeySize)
rand.Read(key)
x := NewXCipher(key)
additionalData := []byte("buffer-test")
b.ResetTimer()
b.SetBytes(int64(dataSize))
for i := 0; i < b.N; i++ {
b.StopTimer()
reader := bytes.NewReader(data)
options := DefaultStreamOptions()
options.BufferSize = bufSize
options.AdditionalData = additionalData
b.StartTimer()
_, _ = x.EncryptStreamWithOptions(reader, io.Discard, options)
}
})
}
}
// BenchmarkStreamWorkerCount tests the impact of different worker thread counts on parallel processing performance
func BenchmarkStreamWorkerCount(b *testing.B) {
dataSize := 20 * 1024 * 1024 // 20MB
// Test different worker thread counts
workerCounts := []int{1, 2, 4, 8, 16}
// Prepare test data
data := genRandomDataForBench(dataSize)
tempFile := createBenchTempFile(b, data)
defer os.Remove(tempFile)
for _, workerCount := range workerCounts {
b.Run(fmt.Sprintf("Workers_%d", workerCount), func(b *testing.B) {
key := make([]byte, chacha20poly1305.KeySize)
rand.Read(key)
x := NewXCipher(key)
additionalData := []byte("worker-test")
b.ResetTimer()
b.SetBytes(int64(dataSize))
for i := 0; i < b.N; i++ {
b.StopTimer()
// Open input file
inFile, err := os.Open(tempFile)
if err != nil {
b.Fatalf("Failed to open test file: %v", err)
}
// Set options
options := DefaultStreamOptions()
options.UseParallel = true
options.MaxWorkers = workerCount
options.AdditionalData = additionalData
b.StartTimer()
// Perform encryption
_, _ = x.EncryptStreamWithOptions(inFile, ioutil.Discard, options)
b.StopTimer()
inFile.Close()
}
})
}
}
// BenchmarkStreamFileVsMemory compares file and memory stream encryption/decryption performance
func BenchmarkStreamFileVsMemory(b *testing.B) {
dataSize := 5 * 1024 * 1024 // 5MB
// Prepare test data
data := genRandomDataForBench(dataSize)
tempFile := createBenchTempFile(b, data)
defer os.Remove(tempFile)
benchCases := []struct {
name string
useFile bool
}{
{"Memory", false},
{"File", true},
}
for _, bc := range benchCases {
b.Run(bc.name, func(b *testing.B) {
key := make([]byte, chacha20poly1305.KeySize)
rand.Read(key)
x := NewXCipher(key)
additionalData := []byte("io-test")
b.ResetTimer()
b.SetBytes(int64(dataSize))
for i := 0; i < b.N; i++ {
b.StopTimer()
var reader io.Reader
var writer io.Writer
var tempOutFile *os.File
if bc.useFile {
// Use file IO
inFile, err := os.Open(tempFile)
if err != nil {
b.Fatalf("Failed to open test file: %v", err)
}
defer inFile.Close()
tempOutFile, err = os.CreateTemp("", "xcipher-bench-out-*")
if err != nil {
b.Fatalf("Failed to create output file: %v", err)
}
defer func() {
tempOutFile.Close()
os.Remove(tempOutFile.Name())
}()
reader = inFile
writer = tempOutFile
} else {
// Use memory IO
reader = bytes.NewReader(data)
writer = ioutil.Discard
}
b.StartTimer()
// Perform encryption
_ = x.EncryptStream(reader, writer, additionalData)
b.StopTimer()
}
})
}
}
// 生成固定的测试密钥
func generateBenchTestKey() []byte {
key := make([]byte, chacha20poly1305.KeySize)
if _, err := rand.Read(key); err != nil {
panic(err)
}
return key
}
var benchTestKey = generateBenchTestKey() // 使用固定密钥以减少测试变量
// BenchmarkEncryptStream 测试流式加密的性能
func BenchmarkEncryptStream(b *testing.B) {
sizes := []int{
1 * 1024 * 1024, // 1MB
16 * 1024 * 1024, // 16MB
64 * 1024 * 1024, // 64MB - 对于大文件的表现
}
for _, size := range sizes {
b.Run(byteCountToString(int64(size)), func(b *testing.B) {
data := genRandomDataForBench(size)
cipher := NewXCipher(benchTestKey)
b.ResetTimer()
b.SetBytes(int64(size))
for i := 0; i < b.N; i++ {
b.StopTimer()
r := bytes.NewReader(data)
w := &bytes.Buffer{}
b.StartTimer()
err := cipher.EncryptStream(r, w, nil)
if err != nil {
b.Fatal(err)
}
}
})
}
}
// BenchmarkEncryptStreamParallel 测试并行流式加密的性能
func BenchmarkEncryptStreamParallel(b *testing.B) {
sizes := []int{
1 * 1024 * 1024, // 1MB
16 * 1024 * 1024, // 16MB
64 * 1024 * 1024, // 64MB - 对于大文件的表现
}
for _, size := range sizes {
b.Run(byteCountToString(int64(size)), func(b *testing.B) {
data := genRandomDataForBench(size)
cipher := NewXCipher(benchTestKey)
options := DefaultStreamOptions()
options.UseParallel = true
options.BufferSize = calculateOptimalBufferSize(options)
b.ResetTimer()
b.SetBytes(int64(size))
for i := 0; i < b.N; i++ {
b.StopTimer()
r := bytes.NewReader(data)
w := &bytes.Buffer{}
b.StartTimer()
_, err := cipher.EncryptStreamWithOptions(r, w, options)
if err != nil {
b.Fatal(err)
}
}
})
}
}
// BenchmarkDecryptStream 测试流式解密的性能
func BenchmarkDecryptStream(b *testing.B) {
sizes := []int{
1 * 1024 * 1024, // 1MB
16 * 1024 * 1024, // 16MB
}
for _, size := range sizes {
b.Run(byteCountToString(int64(size)), func(b *testing.B) {
// 先加密数据
data := genRandomDataForBench(size)
cipher := NewXCipher(benchTestKey)
encBuf := &bytes.Buffer{}
err := cipher.EncryptStream(bytes.NewReader(data), encBuf, nil)
if err != nil {
b.Fatal(err)
}
encData := encBuf.Bytes()
b.ResetTimer()
b.SetBytes(int64(size))
for i := 0; i < b.N; i++ {
b.StopTimer()
r := bytes.NewReader(encData)
w := io.Discard // 使用Discard避免缓冲区分配和写入的开销
b.StartTimer()
err := cipher.DecryptStream(r, w, nil)
if err != nil {
b.Fatal(err)
}
}
})
}
}
// BenchmarkDecryptStreamParallel 测试并行流式解密的性能
func BenchmarkDecryptStreamParallel(b *testing.B) {
sizes := []int{
1 * 1024 * 1024, // 1MB
16 * 1024 * 1024, // 16MB
}
for _, size := range sizes {
b.Run(byteCountToString(int64(size)), func(b *testing.B) {
// 先用并行模式加密数据
data := genRandomDataForBench(size)
cipher := NewXCipher(benchTestKey)
encBuf := &bytes.Buffer{}
options := DefaultStreamOptions()
options.UseParallel = true
_, err := cipher.EncryptStreamWithOptions(bytes.NewReader(data), encBuf, options)
if err != nil {
b.Fatal(err)
}
encData := encBuf.Bytes()
// 解密测试
decOptions := DefaultStreamOptions()
decOptions.UseParallel = true
b.ResetTimer()
b.SetBytes(int64(size))
for i := 0; i < b.N; i++ {
b.StopTimer()
r := bytes.NewReader(encData)
w := io.Discard // 使用Discard避免缓冲区分配和写入的开销
b.StartTimer()
_, err := cipher.DecryptStreamWithOptions(r, w, decOptions)
if err != nil {
b.Fatal(err)
}
}
})
}
}
// byteCountToString 将字节数转换为人类可读的字符串
func byteCountToString(b int64) string {
const unit = 1024
if b < unit {
return fmt.Sprintf("%d B", b)
}
div, exp := int64(unit), 0
for n := b / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB", float64(b)/float64(div), "KMGTPE"[exp])
}

528
xcipher_test.go Normal file
View File

@@ -0,0 +1,528 @@
package xcipher
import (
"bytes"
"context"
"crypto/rand"
"errors"
"fmt"
"golang.org/x/crypto/chacha20poly1305"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
)
// Generate a random key
func generateRandomKey() ([]byte, error) {
key := make([]byte, chacha20poly1305.KeySize)
_, err := rand.Read(key)
return key, err
}
// Generate random data of specified size
func generateRandomData(size int) ([]byte, error) {
data := make([]byte, size)
_, err := rand.Read(data)
return data, err
}
// Create temporary file and write data to it
func createTempFile(t *testing.T, data []byte) string {
tempDir := t.TempDir()
tempFile := filepath.Join(tempDir, "test_data")
if err := ioutil.WriteFile(tempFile, data, 0644); err != nil {
t.Fatalf("Failed to create temporary file: %v", err)
}
return tempFile
}
func TestEncryptDecryptImageWithLog(t *testing.T) {
startTotal := time.Now()
defer func() {
t.Logf("Total time: %v", time.Since(startTotal))
}()
// Read original image
imagePath := "test.jpg"
start := time.Now()
imageData, err := ioutil.ReadFile(imagePath)
if err != nil {
t.Fatalf("Failed to read image: %v", err)
}
t.Logf("[1/7] Read image %s (%.2fKB) time: %v",
imagePath, float64(len(imageData))/1024, time.Since(start))
// Generate encryption key
start = time.Now()
key, err := generateRandomKey()
if err != nil {
t.Fatalf("Failed to generate key: %v", err)
}
t.Logf("[2/7] Generated %d bytes key time: %v", len(key), time.Since(start))
// Initialize cipher
start = time.Now()
xcipher := NewXCipher(key)
t.Logf("[3/7] Initialized cipher time: %v", time.Since(start))
// Perform encryption
additionalData := []byte("Image metadata")
start = time.Now()
ciphertext, err := xcipher.Encrypt(imageData, additionalData)
if err != nil {
t.Fatalf("Encryption failed: %v", err)
}
t.Logf("[4/7] Encrypted data (input: %d bytes, output: %d bytes) time: %v",
len(imageData), len(ciphertext), time.Since(start))
// Save encrypted file
cipherPath := "encrypted.jpg"
start = time.Now()
if err := ioutil.WriteFile(cipherPath, ciphertext, 0644); err != nil {
t.Fatalf("Failed to save encrypted file: %v", err)
}
t.Logf("[5/7] Wrote encrypted file %s time: %v", cipherPath, time.Since(start))
// Perform decryption
start = time.Now()
decryptedData, err := xcipher.Decrypt(ciphertext, additionalData)
if err != nil {
t.Fatalf("Decryption failed: %v", err)
}
decryptDuration := time.Since(start)
t.Logf("[6/7] Decrypted data (input: %d bytes, output: %d bytes) time: %v (%.2f MB/s)",
len(ciphertext), len(decryptedData), decryptDuration,
float64(len(ciphertext))/1e6/decryptDuration.Seconds())
// Verify data integrity
start = time.Now()
if !bytes.Equal(imageData, decryptedData) {
t.Fatal("Decrypted data verification failed")
}
t.Logf("[7/7] Data verification time: %v", time.Since(start))
// Save decrypted image
decryptedPath := "decrypted.jpg"
start = time.Now()
if err := ioutil.WriteFile(decryptedPath, decryptedData, 0644); err != nil {
t.Fatalf("Failed to save decrypted image: %v", err)
}
t.Logf("Saved decrypted image %s (%.2fKB) time: %v",
decryptedPath, float64(len(decryptedData))/1024, time.Since(start))
}
// TestStreamEncryptDecrypt tests basic stream encryption/decryption functionality
func TestStreamEncryptDecrypt(t *testing.T) {
// Generate random key
key, err := generateRandomKey()
if err != nil {
t.Fatalf("Failed to generate key: %v", err)
}
// Initialize cipher
xcipher := NewXCipher(key)
// Generate random test data (1MB)
testSize := 1 * 1024 * 1024
testData, err := generateRandomData(testSize)
if err != nil {
t.Fatalf("Failed to generate test data: %v", err)
}
// Additional data
additionalData := []byte("Test additional data")
// Create input and output buffers
var encryptedBuf bytes.Buffer
encryptedReader := bytes.NewReader(testData)
// Perform stream encryption
err = xcipher.EncryptStream(encryptedReader, &encryptedBuf, additionalData)
if err != nil {
t.Fatalf("Stream encryption failed: %v", err)
}
// Create decryption buffer
var decryptedBuf bytes.Buffer
decryptReader := bytes.NewReader(encryptedBuf.Bytes())
// Perform stream decryption
err = xcipher.DecryptStream(decryptReader, &decryptedBuf, additionalData)
if err != nil {
t.Fatalf("Stream decryption failed: %v", err)
}
// Verify decrypted data matches original data
if !bytes.Equal(testData, decryptedBuf.Bytes()) {
t.Fatal("Stream encrypted/decrypted data does not match")
}
t.Logf("Successfully stream processed %d bytes of data", testSize)
}
// TestStreamEncryptDecryptWithOptions tests stream encryption/decryption with options
func TestStreamEncryptDecryptWithOptions(t *testing.T) {
// Generate random key
key, err := generateRandomKey()
if err != nil {
t.Fatalf("Failed to generate key: %v", err)
}
// Initialize cipher
xcipher := NewXCipher(key)
// Generate random test data (2MB)
testSize := 2 * 1024 * 1024
testData, err := generateRandomData(testSize)
if err != nil {
t.Fatalf("Failed to generate test data: %v", err)
}
// Create temporary file for testing large data
inputFile := createTempFile(t, testData)
defer os.Remove(inputFile)
// Additional data
additionalData := []byte("Test additional data")
// Test different buffer size options
bufferSizes := []int{8 * 1024, 32 * 1024, 128 * 1024}
for _, bufSize := range bufferSizes {
t.Run(fmt.Sprintf("BufferSize=%dKB", bufSize/1024), func(t *testing.T) {
// Create input and output files
encryptedFile := inputFile + ".enc"
decryptedFile := inputFile + ".dec"
defer os.Remove(encryptedFile)
defer os.Remove(decryptedFile)
// Open input file
inFile, err := os.Open(inputFile)
if err != nil {
t.Fatalf("Failed to open input file: %v", err)
}
defer inFile.Close()
// Create encrypted output file
outFile, err := os.Create(encryptedFile)
if err != nil {
t.Fatalf("Failed to create encrypted output file: %v", err)
}
defer outFile.Close()
// Create options
options := DefaultStreamOptions()
options.BufferSize = bufSize
options.AdditionalData = additionalData
options.CollectStats = true
// Perform stream encryption
stats, err := xcipher.EncryptStreamWithOptions(inFile, outFile, options)
if err != nil {
t.Fatalf("Stream encryption failed: %v", err)
}
// Output encryption performance statistics
t.Logf("Encryption performance statistics (buffer size=%dKB):", bufSize/1024)
t.Logf("- Bytes processed: %d", stats.BytesProcessed)
t.Logf("- Blocks processed: %d", stats.BlocksProcessed)
t.Logf("- Average block size: %.2f bytes", stats.AvgBlockSize)
t.Logf("- Processing time: %v", stats.Duration())
t.Logf("- Throughput: %.2f MB/s", stats.Throughput)
// Prepare for decryption
encFile, err := os.Open(encryptedFile)
if err != nil {
t.Fatalf("Failed to open encrypted file: %v", err)
}
defer encFile.Close()
decFile, err := os.Create(decryptedFile)
if err != nil {
t.Fatalf("Failed to create decrypted output file: %v", err)
}
defer decFile.Close()
// Perform stream decryption
_, err = xcipher.DecryptStreamWithOptions(encFile, decFile, options)
if err != nil {
t.Fatalf("Stream decryption failed: %v", err)
}
// Close file to ensure data is written
decFile.Close()
// Read decrypted data for verification
decryptedData, err := ioutil.ReadFile(decryptedFile)
if err != nil {
t.Fatalf("Failed to read decrypted file: %v", err)
}
// Verify data
if !bytes.Equal(testData, decryptedData) {
t.Fatal("Stream encrypted/decrypted data does not match")
}
t.Logf("Successfully stream processed %d bytes of data (buffer=%dKB)", testSize, bufSize/1024)
})
}
}
// TestStreamParallelProcessing tests parallel stream encryption/decryption
func TestStreamParallelProcessing(t *testing.T) {
// Generate random key
key, err := generateRandomKey()
if err != nil {
t.Fatalf("Failed to generate key: %v", err)
}
// Initialize cipher
xcipher := NewXCipher(key)
// Generate large random test data (10MB, enough to trigger parallel processing)
testSize := 10 * 1024 * 1024
testData, err := generateRandomData(testSize)
if err != nil {
t.Fatalf("Failed to generate test data: %v", err)
}
// Create temporary file
inputFile := createTempFile(t, testData)
defer os.Remove(inputFile)
encryptedFile := inputFile + ".parallel.enc"
decryptedFile := inputFile + ".parallel.dec"
defer os.Remove(encryptedFile)
defer os.Remove(decryptedFile)
// Open input file
inFile, err := os.Open(inputFile)
if err != nil {
t.Fatalf("Failed to open input file: %v", err)
}
defer inFile.Close()
// Create encrypted output file
outFile, err := os.Create(encryptedFile)
if err != nil {
t.Fatalf("Failed to create encrypted output file: %v", err)
}
defer outFile.Close()
// Create parallel processing options
options := DefaultStreamOptions()
options.UseParallel = true
options.MaxWorkers = 4 // Use 4 worker threads
options.CollectStats = true
// Perform parallel stream encryption
stats, err := xcipher.EncryptStreamWithOptions(inFile, outFile, options)
if err != nil {
t.Fatalf("Parallel stream encryption failed: %v", err)
}
// Ensure file is written completely
outFile.Close()
// Output encryption performance statistics
t.Logf("Parallel encryption performance statistics:")
t.Logf("- Bytes processed: %d", stats.BytesProcessed)
t.Logf("- Blocks processed: %d", stats.BlocksProcessed)
t.Logf("- Average block size: %.2f bytes", stats.AvgBlockSize)
t.Logf("- Processing time: %v", stats.Duration())
t.Logf("- Throughput: %.2f MB/s", stats.Throughput)
t.Logf("- Worker threads: %d", stats.WorkerCount)
// Prepare for decryption
encFile, err := os.Open(encryptedFile)
if err != nil {
t.Fatalf("Failed to open encrypted file: %v", err)
}
defer encFile.Close()
decFile, err := os.Create(decryptedFile)
if err != nil {
t.Fatalf("Failed to create decrypted output file: %v", err)
}
defer decFile.Close()
// Perform parallel stream decryption
_, err = xcipher.DecryptStreamWithOptions(encFile, decFile, options)
if err != nil {
t.Fatalf("Parallel stream decryption failed: %v", err)
}
// Close file to ensure data is written
decFile.Close()
// Read decrypted data for verification
decryptedData, err := ioutil.ReadFile(decryptedFile)
if err != nil {
t.Fatalf("Failed to read decrypted file: %v", err)
}
// Verify data
if !bytes.Equal(testData, decryptedData) {
t.Fatal("Parallel stream encrypted/decrypted data does not match")
}
t.Logf("Successfully parallel stream processed %d bytes of data", testSize)
}
// TestStreamCancellation tests cancellation of stream encryption/decryption operations
func TestStreamCancellation(t *testing.T) {
// Generate random key
key, err := generateRandomKey()
if err != nil {
t.Fatalf("Failed to generate key: %v", err)
}
// Initialize cipher
xcipher := NewXCipher(key)
// Generate large test data (50MB, enough time to cancel)
testSize := 50 * 1024 * 1024
testData, err := generateRandomData(testSize)
if err != nil {
t.Fatalf("Failed to generate test data: %v", err)
}
// Create an unlimited data source to simulate large file
infiniteReader := &infiniteDataReader{data: testData}
// Create output buffer
var outputBuf bytes.Buffer
// Create context with cancellation
ctx, cancel := context.WithCancel(context.Background())
// Create options with cancel channel
options := DefaultStreamOptions()
options.CancelChan = ctx.Done()
// Cancel operation after a short time
go func() {
time.Sleep(100 * time.Millisecond) // Let encryption run for a short time
cancel()
}()
// Perform stream encryption, should be cancelled
_, err = xcipher.EncryptStreamWithOptions(infiniteReader, &outputBuf, options)
// Verify error is cancellation error
if !errors.Is(err, ErrOperationCancelled) {
t.Fatalf("Expected cancellation error, but got: %v", err)
}
t.Log("Successfully tested stream encryption cancellation")
}
// TestStreamErrors tests error handling in stream encryption/decryption
func TestStreamErrors(t *testing.T) {
// Generate random key
key, err := generateRandomKey()
if err != nil {
t.Fatalf("Failed to generate key: %v", err)
}
// Initialize cipher
xcipher := NewXCipher(key)
// Test invalid buffer size
t.Run("InvalidBufferSize", func(t *testing.T) {
var buf bytes.Buffer
options := DefaultStreamOptions()
options.BufferSize = 1 // Too small buffer
_, err := xcipher.EncryptStreamWithOptions(bytes.NewReader([]byte("test")), &buf, options)
if err == nil || !errors.Is(err, ErrBufferSizeTooSmall) {
t.Fatalf("Expected buffer too small error, but got: %v", err)
}
options.BufferSize = 10 * 1024 * 1024 // Too large buffer
_, err = xcipher.EncryptStreamWithOptions(bytes.NewReader([]byte("test")), &buf, options)
if err == nil || !errors.Is(err, ErrBufferSizeTooLarge) {
t.Fatalf("Expected buffer too large error, but got: %v", err)
}
})
// Test authentication failure
t.Run("AuthenticationFailure", func(t *testing.T) {
// First encrypt some data
plaintext := []byte("Test authentication failure")
var encBuf bytes.Buffer
err := xcipher.EncryptStream(bytes.NewReader(plaintext), &encBuf, nil)
if err != nil {
t.Fatalf("Encryption failed: %v", err)
}
// Tamper with encrypted data
encryptedData := encBuf.Bytes()
if len(encryptedData) > nonceSize+10 {
// Modify one byte in ciphertext part
encryptedData[nonceSize+10]++
}
// Try to decrypt tampered data
var decBuf bytes.Buffer
err = xcipher.DecryptStream(bytes.NewReader(encryptedData), &decBuf, nil)
if err == nil || !errors.Is(err, ErrAuthenticationFailed) {
t.Fatalf("Expected authentication failure error, but got: %v", err)
}
})
// Test read error
t.Run("ReadError", func(t *testing.T) {
reader := &errorReader{err: fmt.Errorf("simulated read error")}
var buf bytes.Buffer
err := xcipher.EncryptStream(reader, &buf, nil)
if err == nil || !errors.Is(err, ErrReadFailed) {
t.Fatalf("Expected read failure error, but got: %v", err)
}
})
// Test write error
t.Run("WriteError", func(t *testing.T) {
writer := &errorWriter{err: fmt.Errorf("simulated write error")}
err := xcipher.EncryptStream(bytes.NewReader([]byte("test")), writer, nil)
if err == nil || !errors.Is(err, ErrWriteFailed) {
t.Fatalf("Expected write failure error, but got: %v", err)
}
})
}
// Infinite data reader for testing cancellation
type infiniteDataReader struct {
data []byte
pos int
}
func (r *infiniteDataReader) Read(p []byte) (n int, err error) {
if r.pos >= len(r.data) {
r.pos = 0 // Cycle through data
}
n = copy(p, r.data[r.pos:])
r.pos += n
return n, nil
}
// Reader that simulates read errors
type errorReader struct {
err error
}
func (r *errorReader) Read(p []byte) (n int, err error) {
return 0, r.err
}
// Writer that simulates write errors
type errorWriter struct {
err error
}
func (w *errorWriter) Write(p []byte) (n int, err error) {
return 0, w.err
}