This commit is contained in:
2025-07-07 20:11:59 +08:00
parent ab0fdbc447
commit 06e3aa2eb3
2009 changed files with 193082 additions and 0 deletions

View File

@ -0,0 +1,278 @@
package fileUpload
import (
"bytes"
"fmt"
"github.com/gogf/gf/v2/frame/g"
"github.com/gogf/gf/v2/net/ghttp"
"github.com/tiger1103/gfast/v3/api/v1/common/globe"
"io"
"log"
"os"
"path"
"strconv"
"strings"
)
func InitUploadApi(group *ghttp.RouterGroup) {
group.POST("/source/upload", SourceUploadFunc)
//group.Bind(new(SourceUpload))
}
type SourceUpload struct {
}
type SourceUploadReq struct {
g.Meta `path:"source/upload" dc:"上传资源" method:"post" tags:"资源相关" `
}
type SourceUploadRes struct {
}
/*func (SourceUpload) UploadFile(ctx context.Context, req *SourceUploadReq) (res *SourceUploadRes, err error) {
err = startSaveFile(g.RequestFromCtx(ctx))
return
}*/
func SourceUploadFunc(request *ghttp.Request) {
projectId := request.Get("projectId")
fmt.Println("projectId", projectId)
startSaveFile(request)
}
func startSaveFile(request *ghttp.Request) error {
err, filename := Upload(request, globe.SOURCE)
fmt.Println("结束了")
fmt.Println(err)
if err != nil {
return err
}
fmt.Println(filename)
/* arr := strings.Split(filename, ".")
arr = arr[:len(arr)-1]
suffix := path.Ext(filename)
var SourceType = ""
switch suffix {
case globe.CLT:
SourceType = globe.TILESET
break
case globe.JCT:
SourceType = globe.TILESET
break
case globe.MBTILES:
SourceType = globe.LAYER
break
case globe.PAK:
//此时需要判断是地形还是正射
SourceType = globe.LAYER
break
}*/
//source := database.SOURCE{
// SourceID: tool.GetUuid(),
// SourceName: strings.Join(arr, "."),
// SourceType: SourceType,
// SourcePath: filename,
//}
//database.GetORMDBInstance().Model(&database.SOURCE{}).Create(&source)
return err
}
func Upload(r *ghttp.Request, dir string) (error, string) {
var contentLength int64
contentLength = r.Request.ContentLength
if contentLength <= 0 {
return globe.GetErrors("content_length error"), ""
}
content_type_, has_key := r.Request.Header["Content-Type"]
if !has_key {
return globe.GetErrors("Content-Type error"), ""
}
if len(content_type_) != 1 {
return globe.GetErrors("Content-Type count error"), ""
}
contentType := content_type_[0]
const BOUNDARY string = "; boundary="
loc := strings.Index(contentType, BOUNDARY)
if -1 == loc {
return globe.GetErrors("Content-Type error, no boundary"), ""
}
boundary := []byte(contentType[(loc + len(BOUNDARY)):])
readData := make([]byte, 1024*12)
var readTotal = 0
var des = ""
var filename = ""
for {
fileHeader, fileData, err := ParseFromHead(readData, readTotal, append(boundary, []byte("\r\n")...), r.Request.Body)
if err != nil {
return err, ""
}
filename = fileHeader.FileName
des = path.Join(dir, filename)
f, err := os.Create(des)
if err != nil {
return err, ""
}
f.Write(fileData)
fileData = nil
//需要反复搜索boundary
tempData, reachEnd, err := ReadToBoundary(boundary, r.Request.Body, f)
f.Close()
if err != nil {
return err, ""
}
if reachEnd {
break
} else {
copy(readData[0:], tempData)
readTotal = len(tempData)
continue
}
}
return nil, filename
}
// / 解析多个文件上传中,每个具体的文件的信息
type FileHeader struct {
ContentDisposition string
Name string
FileName string ///< 文件名
ContentType string
ContentLength int64
}
// / 解析描述文件信息的头部
// / @return FileHeader 文件名等信息的结构体
// / @return bool 解析成功还是失败
func ParseFileHeader(h []byte) (FileHeader, bool) {
arr := bytes.Split(h, []byte("\r\n"))
var out_header FileHeader
out_header.ContentLength = -1
const (
CONTENT_DISPOSITION = "Content-Disposition: "
NAME = "name=\""
FILENAME = "filename=\""
CONTENT_TYPE = "Content-Type: "
CONTENT_LENGTH = "Content-Length: "
)
for _, item := range arr {
if bytes.HasPrefix(item, []byte(CONTENT_DISPOSITION)) {
l := len(CONTENT_DISPOSITION)
arr1 := bytes.Split(item[l:], []byte("; "))
out_header.ContentDisposition = string(arr1[0])
if bytes.HasPrefix(arr1[1], []byte(NAME)) {
out_header.Name = string(arr1[1][len(NAME) : len(arr1[1])-1])
}
fmt.Println(arr1)
l = len(arr1[2])
if bytes.HasPrefix(arr1[2], []byte(FILENAME)) && arr1[2][l-1] == 0x22 {
out_header.FileName = string(arr1[2][len(FILENAME) : l-1])
}
} else if bytes.HasPrefix(item, []byte(CONTENT_TYPE)) {
l := len(CONTENT_TYPE)
out_header.ContentType = string(item[l:])
} else if bytes.HasPrefix(item, []byte(CONTENT_LENGTH)) {
l := len(CONTENT_LENGTH)
s := string(item[l:])
content_length, err := strconv.ParseInt(s, 10, 64)
if err != nil {
log.Printf("content length error:%s", string(item))
return out_header, false
} else {
out_header.ContentLength = content_length
}
} else {
log.Printf("unknown:%s\n", string(item))
}
}
if len(out_header.FileName) == 0 {
return out_header, false
}
return out_header, true
}
// / 从流中一直读到文件的末位
// / @return []byte 没有写到文件且又属于下一个文件的数据
// / @return bool 是否已经读到流的末位了
// / @return error 是否发生错误
func ReadToBoundary(boundary []byte, stream io.ReadCloser, target io.WriteCloser) ([]byte, bool, error) {
read_data := make([]byte, 1024*8)
read_data_len := 0
buf := make([]byte, 1024*4)
b_len := len(boundary)
reach_end := false
for !reach_end {
read_len, err := stream.Read(buf)
if err != nil {
if err != io.EOF && read_len <= 0 {
return nil, true, err
}
reach_end = true
}
//todo: 下面这一句很蠢,值得优化
copy(read_data[read_data_len:], buf[:read_len]) //追加到另一块buffer仅仅只是为了搜索方便
read_data_len += read_len
if read_data_len < b_len+4 {
continue
}
loc := bytes.Index(read_data[:read_data_len], boundary)
if loc >= 0 {
//找到了结束位置
target.Write(read_data[:loc-4])
return read_data[loc:read_data_len], reach_end, nil
}
target.Write(read_data[:read_data_len-b_len-4])
copy(read_data[0:], read_data[read_data_len-b_len-4:])
read_data_len = b_len + 4
}
target.Write(read_data[:read_data_len])
return nil, reach_end, nil
}
// / 解析表单的头部
// / @param read_data 已经从流中读到的数据
// / @param read_total 已经从流中读到的数据长度
// / @param boundary 表单的分割字符串
// / @param stream 输入流
// / @return FileHeader 文件名等信息头
// / []byte 已经从流中读到的部分
// / error 是否发生错误
func ParseFromHead(read_data []byte, readTotal int, boundary []byte, stream io.ReadCloser) (FileHeader, []byte, error) {
buf := make([]byte, 1024*4)
foundBoundary := false
boundaryLoc := -1
var file_header FileHeader
for {
read_len, err := stream.Read(buf)
fmt.Println("read_len", read_len)
if err != nil {
if err != io.EOF {
return file_header, nil, err
}
break
}
if readTotal+read_len > cap(read_data) {
return file_header, nil, fmt.Errorf("not found boundary")
}
copy(read_data[readTotal:], buf[:read_len])
readTotal += read_len
if !foundBoundary {
boundaryLoc = bytes.Index(read_data[:readTotal], boundary)
if -1 == boundaryLoc {
continue
}
foundBoundary = true
}
start_loc := boundaryLoc + len(boundary)
file_head_loc := bytes.Index(read_data[start_loc:readTotal], []byte("\r\n\r\n"))
if -1 == file_head_loc {
continue
}
file_head_loc += start_loc
ret := false
file_header, ret = ParseFileHeader(read_data[start_loc:file_head_loc])
if !ret {
return file_header, nil, fmt.Errorf("ParseFileHeader fail:%s", string(read_data[start_loc:file_head_loc]))
}
return file_header, read_data[file_head_loc+4 : readTotal], nil
}
return file_header, nil, fmt.Errorf("reach to stream EOF")
}