use go1.11 with go mod

This commit is contained in:
deepzz0
2018-08-25 18:29:00 +08:00
parent 1d54ff3ac5
commit c6a2439c54
1912 changed files with 102 additions and 498482 deletions

View File

@@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View File

@@ -1,13 +0,0 @@
language: go
go:
- 1.7
- 1.8
- master
env:
global:
- QINIU_KODO_TEST=1
install:
- export QINIU_SRC=$HOME/gopath/src
- mkdir -p $QINIU_SRC/github.com/qiniu
- go get github.com/qiniu/x

View File

@@ -1,56 +0,0 @@
# Changelog
# 7.2.3 (2017-09-25)
* 增加Qiniu的鉴权方式
* 删除prefop域名检测功能
* 暴露分片上传的接口以支持复杂的自定义业务逻辑
## 7.2.2 (2017-09-19)
* 为表单上传和分片上传增加代理支持
* 优化表单上传的crc32计算方式减少内存消耗
* 增加网页图片的Base64上传方式
## 7.2.1 (2017-08-20)
* 设置FormUpload默认支持crc32校验
* ResumeUpload从API层面即支持crc32校验
## 7.2.0 (2017-07-28)
* 重构了v7 SDK的所有代码
## 7.1.0 (2016-6-22)
### 增加
* 增加多机房相关功能
## 7.0.5 (2015-11-20)
### 增加
* add delimiter support to Bucket.List
* 增加回调校验
## 7.0.4 (2015-09-03)
### 增加
* 上传返回参数PutRet增加PersistentId用于获取上传对应的fop操作的id
### 修复
* token 覆盖问题
## 7.0.3 (2015-07-11)
### 增加
* support NestedObject
## 7.0.2 (2015-07-7-10)
### 增加
* 增加跨空间移动文件(Bucket.MoveEx)
## 7.0.1 (2015-07-7-10)
### 增加
* 完善 PutPolicy支持 MimeLimit、CallbackHost、CallbackFetchKey、 CallbackBodyType、 Checksum
## 7.0.0 (2016-06-29)
* 重构,初始版本

View File

@@ -1,5 +0,0 @@
test:
go test -v ./auth/...
go test -v ./conf/...
go test -v ./cdn/...
go test -v ./storage/...

View File

@@ -1,20 +0,0 @@
github.com/qiniu/api.v7 (Qiniu Go SDK v7.x)
===============
[![Build Status](https://travis-ci.org/qiniu/api.v7.svg?branch=master)](https://travis-ci.org/qiniu/api.v7) [![GoDoc](https://godoc.org/github.com/qiniu/api.v7?status.svg)](https://godoc.org/github.com/qiniu/api.v7)
[![Qiniu Logo](http://open.qiniudn.com/logo.png)](http://qiniu.com/)
# 下载
```
go get -u github.com/qiniu/api.v7
```
# 文档
[七牛SDK文档站](https://developer.qiniu.com/kodo/sdk/1238/go) 或者 [项目WIKI](https://github.com/qiniu/api.v7/wiki)
# 示例
[参考代码](https://github.com/qiniu/api.v7/tree/master/examples)

View File

@@ -1,2 +0,0 @@
// qbox 包提供了该SDK需要的相关鉴权方法
package qbox

View File

@@ -1,152 +0,0 @@
package qbox
import (
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"fmt"
"io"
"net/http"
"github.com/qiniu/x/bytes.v7/seekable"
)
// Mac 七牛AK/SK的对象AK/SK可以从 https://portal.qiniu.com/user/key 获取。
type Mac struct {
AccessKey string
SecretKey []byte
}
// NewMac 构建一个新的拥有AK/SK的对象
func NewMac(accessKey, secretKey string) (mac *Mac) {
return &Mac{accessKey, []byte(secretKey)}
}
// Sign 对数据进行签名,一般用于私有空间下载用途
func (mac *Mac) Sign(data []byte) (token string) {
h := hmac.New(sha1.New, mac.SecretKey)
h.Write(data)
sign := base64.URLEncoding.EncodeToString(h.Sum(nil))
return fmt.Sprintf("%s:%s", mac.AccessKey, sign)
}
// SignWithData 对数据进行签名,一般用于上传凭证的生成用途
func (mac *Mac) SignWithData(b []byte) (token string) {
encodedData := base64.URLEncoding.EncodeToString(b)
h := hmac.New(sha1.New, mac.SecretKey)
h.Write([]byte(encodedData))
digest := h.Sum(nil)
sign := base64.URLEncoding.EncodeToString(digest)
return fmt.Sprintf("%s:%s:%s", mac.AccessKey, sign, encodedData)
}
// SignRequest 对数据进行签名,一般用于管理凭证的生成
func (mac *Mac) SignRequest(req *http.Request) (token string, err error) {
h := hmac.New(sha1.New, mac.SecretKey)
u := req.URL
data := u.Path
if u.RawQuery != "" {
data += "?" + u.RawQuery
}
io.WriteString(h, data+"\n")
if incBody(req) {
s2, err2 := seekable.New(req)
if err2 != nil {
return "", err2
}
h.Write(s2.Bytes())
}
sign := base64.URLEncoding.EncodeToString(h.Sum(nil))
token = fmt.Sprintf("%s:%s", mac.AccessKey, sign)
return
}
// SignRequestV2 对数据进行签名,一般用于高级管理凭证的生成
func (mac *Mac) SignRequestV2(req *http.Request) (token string, err error) {
h := hmac.New(sha1.New, mac.SecretKey)
u := req.URL
//write method path?query
io.WriteString(h, fmt.Sprintf("%s %s", req.Method, u.Path))
if u.RawQuery != "" {
io.WriteString(h, "?")
io.WriteString(h, u.RawQuery)
}
//write host and posrt
io.WriteString(h, "\nHost: ")
io.WriteString(h, req.Host)
if req.URL.Port() != "" {
io.WriteString(h, ":")
io.WriteString(h, req.URL.Port())
}
//write content type
contentType := req.Header.Get("Content-Type")
if contentType != "" {
io.WriteString(h, "\n")
io.WriteString(h, fmt.Sprintf("Content-Type: %s", contentType))
}
io.WriteString(h, "\n\n")
//write body
if incBodyV2(req) {
s2, err2 := seekable.New(req)
if err2 != nil {
return "", err2
}
h.Write(s2.Bytes())
}
sign := base64.URLEncoding.EncodeToString(h.Sum(nil))
token = fmt.Sprintf("%s:%s", mac.AccessKey, sign)
return
}
// 管理凭证生成时是否同时对request body进行签名
func incBody(req *http.Request) bool {
return req.Body != nil &&
req.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
}
func incBodyV2(req *http.Request) bool {
contentType := req.Header.Get("Content-Type")
return req.Body != nil && (contentType == "application/x-www-form-urlencoded" ||
contentType == "application/json")
}
// VerifyCallback 验证上传回调请求是否来自七牛
func (mac *Mac) VerifyCallback(req *http.Request) (bool, error) {
auth := req.Header.Get("Authorization")
if auth == "" {
return false, nil
}
token, err := mac.SignRequest(req)
if err != nil {
return false, err
}
return auth == "QBox "+token, nil
}
// Sign 一般用于下载凭证的签名
func Sign(mac *Mac, data []byte) string {
return mac.Sign(data)
}
// SignWithData 一般用于上传凭证的签名
func SignWithData(mac *Mac, data []byte) string {
return mac.SignWithData(data)
}
// VerifyCallback 验证上传回调请求是否来自七牛
func VerifyCallback(mac *Mac, req *http.Request) (bool, error) {
return mac.VerifyCallback(req)
}

View File

@@ -1,33 +0,0 @@
package cdn
import (
"crypto/md5"
"fmt"
"net/url"
"time"
)
// CreateTimestampAntileechURL 用来构建七牛CDN时间戳防盗链的访问链接
func CreateTimestampAntileechURL(urlStr string, encryptKey string,
durationInSeconds int64) (antileechURL string, err error) {
u, err := url.Parse(urlStr)
if err != nil {
return
}
expireTime := time.Now().Add(time.Second * time.Duration(durationInSeconds)).Unix()
toSignStr := fmt.Sprintf("%s%s%x", encryptKey, u.EscapedPath(), expireTime)
signedStr := fmt.Sprintf("%x", md5.Sum([]byte(toSignStr)))
q := url.Values{}
q.Add("sign", signedStr)
q.Add("t", fmt.Sprintf("%x", expireTime))
if u.RawQuery == "" {
antileechURL = u.String() + "?" + q.Encode()
} else {
antileechURL = u.String() + "&" + q.Encode()
}
return
}

View File

@@ -1,38 +0,0 @@
package cdn
import (
"testing"
)
func TestCreateTimestampAntiLeech(t *testing.T) {
type args struct {
urlStr string
encryptKey string
durationInSeconds int64
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "antileech_1",
args: args{
urlStr: "http://www.example.com/testfile.jpg",
encryptKey: "abc123",
durationInSeconds: 3600,
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
targetUrl, err := CreateTimestampAntileechURL(tt.args.urlStr, tt.args.encryptKey, tt.args.durationInSeconds)
if (err != nil) != tt.wantErr {
t.Errorf("CreateTimestampAntiLeech() error = %v, wantErr %v", err, tt.wantErr)
return
}
t.Log(targetUrl)
})
}
}

View File

@@ -1,301 +0,0 @@
package cdn
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
"github.com/qiniu/api.v7/auth/qbox"
)
// Fusion CDN服务域名
var (
FusionHost = "http://fusion.qiniuapi.com"
)
// CdnManager 提供了文件和目录刷新,文件预取,获取域名带宽和流量数据,获取域名日志列表等功能
type CdnManager struct {
mac *qbox.Mac
}
// NewCdnManager 用来构建一个新的 CdnManager
func NewCdnManager(mac *qbox.Mac) *CdnManager {
return &CdnManager{mac: mac}
}
// TrafficReq 为批量查询带宽/流量的API请求内容
// StartDate 开始日期格式例如2016-07-01
// EndDate 结束日期格式例如2016-07-03
// Granularity 取值粒度取值可选值5min/hour/day
// Domains 域名列表,彼此用 ; 连接
type TrafficReq struct {
StartDate string `json:"startDate"`
EndDate string `json:"endDate"`
Granularity string `json:"granularity"`
Domains string `json:"domains"`
}
// TrafficResp 为带宽/流量查询响应内容
type TrafficResp struct {
Code int `json:"code"`
Error string `json:"error"`
Time []string `json:"time,omitempty"`
Data map[string]TrafficData `json:"data,omitempty"`
}
// TrafficData 为带宽/流量数据
type TrafficData struct {
DomainChina []int `json:"china"`
DomainOversea []int `json:"oversea"`
}
// GetBandwidthData 方法用来获取域名访问带宽数据
// StartDate string 必须 开始日期例如2016-07-01
// EndDate string 必须 结束日期例如2016-07-03
// Granularity string 必须 粒度取值5min hour day
// Domains []string 必须 域名列表
func (m *CdnManager) GetBandwidthData(startDate, endDate, granularity string,
domainList []string) (bandwidthData TrafficResp, err error) {
domains := strings.Join(domainList, ";")
reqBody := TrafficReq{
StartDate: startDate,
EndDate: endDate,
Granularity: granularity,
Domains: domains,
}
resData, reqErr := postRequest(m.mac, "/v2/tune/bandwidth", reqBody)
if reqErr != nil {
err = reqErr
return
}
umErr := json.Unmarshal(resData, &bandwidthData)
if umErr != nil {
err = umErr
return
}
return
}
// GetFluxData 方法用来获取域名访问流量数据
// StartDate string 必须 开始日期例如2016-07-01
// EndDate string 必须 结束日期例如2016-07-03
// Granularity string 必须 粒度取值5min hour day
// Domains []string 必须 域名列表
func (m *CdnManager) GetFluxData(startDate, endDate, granularity string,
domainList []string) (fluxData TrafficResp, err error) {
domains := strings.Join(domainList, ";")
reqBody := TrafficReq{
StartDate: startDate,
EndDate: endDate,
Granularity: granularity,
Domains: domains,
}
resData, reqErr := postRequest(m.mac, "/v2/tune/flux", reqBody)
if reqErr != nil {
err = reqErr
return
}
umErr := json.Unmarshal(resData, &fluxData)
if umErr != nil {
err = umErr
return
}
return
}
// RefreshReq 为缓存刷新请求内容
type RefreshReq struct {
Urls []string `json:"urls"`
Dirs []string `json:"dirs"`
}
// RefreshResp 缓存刷新响应内容
type RefreshResp struct {
Code int `json:"code"`
Error string `json:"error"`
RequestID string `json:"requestId,omitempty"`
InvalidUrls []string `json:"invalidUrls,omitempty"`
InvalidDirs []string `json:"invalidDirs,omitempty"`
URLQuotaDay int `json:"urlQuotaDay,omitempty"`
URLSurplusDay int `json:"urlSurplusDay,omitempty"`
DirQuotaDay int `json:"dirQuotaDay,omitempty"`
DirSurplusDay int `json:"dirSurplusDay,omitempty"`
}
// RefreshUrlsAndDirs 方法用来刷新文件或目录
// urls 要刷新的单个url列表单次方法调用总数不超过100条单个url即一个具体的url
// 例如http://bar.foo.com/index.html
// dirs 要刷新的目录url列表单次方法调用总数不超过10条目录dir即表示一个目录级的url
// 例如http://bar.foo.com/dir/
func (m *CdnManager) RefreshUrlsAndDirs(urls, dirs []string) (result RefreshResp, err error) {
if len(urls) > 100 {
err = errors.New("urls count exceeds the limit of 100")
return
}
if len(dirs) > 10 {
err = errors.New("dirs count exceeds the limit of 10")
return
}
reqBody := RefreshReq{
Urls: urls,
Dirs: dirs,
}
resData, reqErr := postRequest(m.mac, "/v2/tune/refresh", reqBody)
if reqErr != nil {
err = reqErr
return
}
umErr := json.Unmarshal(resData, &result)
if umErr != nil {
err = reqErr
return
}
return
}
// RefreshUrls 刷新文件
func (m *CdnManager) RefreshUrls(urls []string) (result RefreshResp, err error) {
return m.RefreshUrlsAndDirs(urls, nil)
}
// RefreshDirs 刷新目录
func (m *CdnManager) RefreshDirs(dirs []string) (result RefreshResp, err error) {
return m.RefreshUrlsAndDirs(nil, dirs)
}
// PrefetchReq 文件预取请求内容
type PrefetchReq struct {
Urls []string `json:"urls"`
}
// PrefetchResp 文件预取响应内容
type PrefetchResp struct {
Code int `json:"code"`
Error string `json:"error"`
RequestID string `json:"requestId,omitempty"`
InvalidUrls []string `json:"invalidUrls,omitempty"`
QuotaDay int `json:"quotaDay,omitempty"`
SurplusDay int `json:"surplusDay,omitempty"`
}
// PrefetchUrls 预取文件链接每次最多不可以超过100条
func (m *CdnManager) PrefetchUrls(urls []string) (result PrefetchResp, err error) {
if len(urls) > 100 {
err = errors.New("urls count exceeds the limit of 100")
return
}
reqBody := PrefetchReq{
Urls: urls,
}
resData, reqErr := postRequest(m.mac, "/v2/tune/prefetch", reqBody)
if reqErr != nil {
err = reqErr
return
}
umErr := json.Unmarshal(resData, &result)
if umErr != nil {
err = umErr
return
}
return
}
// ListLogRequest 日志下载请求内容
type ListLogRequest struct {
Day string `json:"day"`
Domains string `json:"domains"`
}
// ListLogResult 日志下载相应内容
type ListLogResult struct {
Code int `json:"code"`
Error string `json:"error"`
Data map[string][]LogDomainInfo `json:"data"`
}
// LogDomainInfo 日志下载信息
type LogDomainInfo struct {
Name string `json:"name"`
Size int64 `json:"size"`
ModifiedTime int64 `json:"mtime"`
URL string `json:"url"`
}
// GetCdnLogList 获取CDN域名访问日志的下载链接
func (m *CdnManager) GetCdnLogList(day string, domains []string) (
listLogResult ListLogResult, err error) {
//new log query request
logReq := ListLogRequest{
Day: day,
Domains: strings.Join(domains, ";"),
}
resData, reqErr := postRequest(m.mac, "/v2/tune/log/list", logReq)
if reqErr != nil {
err = fmt.Errorf("get response error, %s", reqErr)
return
}
if decodeErr := json.Unmarshal(resData, &listLogResult); decodeErr != nil {
err = fmt.Errorf("get response error, %s", decodeErr)
return
}
if listLogResult.Error != "" {
err = fmt.Errorf("get log list error, %d %s", listLogResult.Code, listLogResult.Error)
return
}
return
}
// RequestWithBody 带body对api发出请求并且返回response body
func postRequest(mac *qbox.Mac, path string, body interface{}) (resData []byte,
err error) {
urlStr := fmt.Sprintf("%s%s", FusionHost, path)
reqData, _ := json.Marshal(body)
req, reqErr := http.NewRequest("POST", urlStr, bytes.NewReader(reqData))
if reqErr != nil {
err = reqErr
return
}
accessToken, signErr := mac.SignRequest(req)
if signErr != nil {
err = signErr
return
}
req.Header.Add("Authorization", "QBox "+accessToken)
req.Header.Add("Content-Type", "application/json")
resp, respErr := http.DefaultClient.Do(req)
if respErr != nil {
err = respErr
return
}
defer resp.Body.Close()
resData, ioErr := ioutil.ReadAll(resp.Body)
if ioErr != nil {
err = ioErr
return
}
return
}

View File

@@ -1,247 +0,0 @@
package cdn
import (
"os"
"testing"
"time"
"github.com/qiniu/api.v7/auth/qbox"
)
//global variables
var (
ak = os.Getenv("QINIU_ACCESS_KEY")
sk = os.Getenv("QINIU_SECRET_KEY")
domain = os.Getenv("QINIU_TEST_DOMAIN")
layout = "2006-01-02"
now = time.Now()
startDate = now.AddDate(0, 0, -2).Format(layout)
endDate = now.AddDate(0, 0, -1).Format(layout)
logDate = now.AddDate(0, 0, -1).Format(layout)
testUrls = []string{
"http://gosdk.qiniudn.com/qiniu1.png",
"http://gosdk.qiniudn.com/qiniu2.png",
}
testDirs = []string{
"http://gosdk.qiniudn.com/dir1/",
"http://gosdk.qiniudn.com/dir2/",
}
)
var mac *qbox.Mac
var cdnManager *CdnManager
func init() {
if ak == "" || sk == "" {
panic("please run ./test-env.sh first")
}
mac = qbox.NewMac(ak, sk)
cdnManager = NewCdnManager(mac)
}
//TestGetBandwidthData
func TestGetBandwidthData(t *testing.T) {
type args struct {
startDate string
endDate string
granularity string
domainList []string
}
testCases := []struct {
name string
args args
wantCode int
}{
{
name: "CdnManager_TestGetBandwidthData",
args: args{
startDate,
endDate,
"5min",
[]string{domain},
},
wantCode: 200,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ret, err := cdnManager.GetBandwidthData(tc.args.startDate, tc.args.endDate,
tc.args.granularity, tc.args.domainList)
if err != nil || ret.Code != tc.wantCode {
t.Errorf("GetBandwidth() error = %v, %v", err, ret.Error)
return
}
})
}
}
//TestGetFluxData
func TestGetFluxData(t *testing.T) {
type args struct {
startDate string
endDate string
granularity string
domainList []string
}
testCases := []struct {
name string
args args
wantCode int
}{
{
name: "CdnManager_TestGetFluxData",
args: args{
startDate,
endDate,
"5min",
[]string{domain},
},
wantCode: 200,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ret, err := cdnManager.GetFluxData(tc.args.startDate, tc.args.endDate,
tc.args.granularity, tc.args.domainList)
if err != nil || ret.Code != tc.wantCode {
t.Errorf("GetFlux() error = %v, %v", err, ret.Error)
return
}
})
}
}
//TestRefreshUrls
func TestRefreshUrls(t *testing.T) {
type args struct {
urls []string
}
testCases := []struct {
name string
args args
wantCode int
}{
{
name: "CdnManager_TestRefresUrls",
args: args{
urls: testUrls,
},
wantCode: 200,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ret, err := cdnManager.RefreshUrls(tc.args.urls)
if err != nil || ret.Code != tc.wantCode {
t.Errorf("RefreshUrls() error = %v, %v", err, ret.Error)
return
}
})
}
}
//TestRefreshDirs
func TestRefreshDirs(t *testing.T) {
type args struct {
dirs []string
}
testCases := []struct {
name string
args args
wantCode int
}{
{
name: "CdnManager_TestRefreshDirs",
args: args{
dirs: testDirs,
},
wantCode: 200,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ret, err := cdnManager.RefreshDirs(tc.args.dirs)
if err != nil || ret.Code != tc.wantCode {
if ret.Error == "refresh dir limit error" {
t.Logf("RefreshDirs() error=%v", ret.Error)
} else {
t.Errorf("RefreshDirs() error = %v, %v", err, ret.Error)
}
return
}
})
}
}
//TestPrefetchUrls
func TestPrefetchUrls(t *testing.T) {
type args struct {
urls []string
}
testCases := []struct {
name string
args args
wantCode int
}{
{
name: "CdnManager_PrefetchUrls",
args: args{
urls: testUrls,
},
wantCode: 200,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ret, err := cdnManager.PrefetchUrls(tc.args.urls)
if err != nil || ret.Code != tc.wantCode {
t.Errorf("PrefetchUrls() error = %v, %v", err, ret.Error)
return
}
})
}
}
//TestGetCdnLogList
func TestGetCdnLogList(t *testing.T) {
type args struct {
date string
domains []string
}
testCases := []struct {
name string
args args
}{
{
name: "CdnManager_TestGetCdnLogList",
args: args{
date: logDate,
domains: []string{domain},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
_, err := cdnManager.GetCdnLogList(tc.args.date, tc.args.domains)
if err != nil {
t.Errorf("GetCdnLogList() error = %v", err)
return
}
})
}
}

View File

@@ -1,3 +0,0 @@
// cdn 包提供了 Fusion CDN的常见功能。相关功能的文档参考https://developer.qiniu.com/fusion。
// 目前提供了文件和目录刷新,文件预取,获取域名带宽和流量数据,获取域名日志列表等功能。
package cdn

View File

@@ -1,30 +0,0 @@
package conf
import (
"fmt"
"runtime"
"syscall"
"github.com/qiniu/x/ctype.v7"
"github.com/qiniu/x/rpc.v7"
)
var version = "7.2.3"
const (
ctypeAppName = ctype.ALPHA | ctype.DIGIT | ctype.UNDERLINE | ctype.SPACE_BAR | ctype.SUB | ctype.DOT
)
// userApp should be [A-Za-z0-9_\ \-\.]*
func SetAppName(userApp string) error {
if userApp != "" && !ctype.IsType(ctypeAppName, userApp) {
return syscall.EINVAL
}
rpc.UserAgent = fmt.Sprintf(
"QiniuGo/%s (%s; %s; %s) %s", version, runtime.GOOS, runtime.GOARCH, userApp, runtime.Version())
return nil
}
func init() {
SetAppName("")
}

View File

@@ -1,35 +0,0 @@
package conf
import (
"strings"
"testing"
"github.com/qiniu/x/rpc.v7"
)
func TestUA(t *testing.T) {
err := SetAppName("")
if err != nil {
t.Fatal("expect no error")
}
err = SetAppName("错误的UA")
if err == nil {
t.Fatal("expect an invalid ua format")
}
err = SetAppName("Test0-_.")
if err != nil {
t.Fatal("expect no error")
}
}
func TestFormat(t *testing.T) {
str := "tesT0.-_"
SetAppName(str)
v := rpc.UserAgent
if !strings.Contains(v, str) {
t.Fatal("should include user")
}
if !strings.HasPrefix(v, "QiniuGo/"+version) {
t.Fatal("invalid format")
}
}

View File

@@ -1,2 +0,0 @@
// conf 包提供了设置APP名称的方法。该APP名称会被放入API请求的UserAgent中方便后续查询日志分析问题。
package conf

View File

@@ -1,19 +0,0 @@
/*
包 github.com/qiniu/api.v7 是七牛 Go 语言 SDK v7.x 版本。
主要提供了存储的数据上传下载管理以及CDN相关的功能。要求Go语言版本>=1.7.0。
Go SDK 中主要包含几个包:
auth 包提供鉴权相关方法conf 包提供配置相关方法cdn包提供CDN相关的功能storage包提供存储相关的功能。
*/
package api
import (
_ "github.com/qiniu/api.v7/auth/qbox"
_ "github.com/qiniu/api.v7/cdn"
_ "github.com/qiniu/api.v7/conf"
_ "github.com/qiniu/api.v7/storage"
)

View File

@@ -1,35 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
cfg := storage.Config{}
mac := qbox.NewMac(accessKey, secretKey)
bucketManger := storage.NewBucketManager(mac, &cfg)
siteURL := "http://devtools.qiniu.com"
// 设置镜像存储
err := bucketManger.SetImage(siteURL, bucket)
if err != nil {
fmt.Println(err)
}
// 取消设置镜像存储
err = bucketManger.UnsetImage(bucket)
if err != nil {
fmt.Println(err)
}
}

View File

@@ -1,20 +0,0 @@
package main
import (
"fmt"
"time"
"github.com/qiniu/api.v7/cdn"
)
func main() {
urlStr := "http://image.example.com/qiniu_do_not_delete.gif"
cryptKey := "your crypt key"
deadline := time.Now().Add(time.Second * 3600).Unix()
accessUrl, err := cdn.CreateTimestampAntileechURL(urlStr, cryptKey, deadline)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(accessUrl)
}

View File

@@ -1,31 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/cdn"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
domain = os.Getenv("QINIU_TEST_DOMAIN")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cdnManager := cdn.NewCdnManager(mac)
startDate := "2017-07-20"
endDate := "2017-07-30"
g := "day"
data, err := cdnManager.GetBandwidthData(startDate, endDate, g, []string{domain})
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("%v\n", data)
}

View File

@@ -1,31 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/cdn"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
domain = os.Getenv("QINIU_TEST_DOMAIN")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cdnManager := cdn.NewCdnManager(mac)
startDate := "2017-07-20"
endDate := "2017-07-30"
g := "day"
data, err := cdnManager.GetFluxData(startDate, endDate, g, []string{domain})
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("%v\n", data)
}

View File

@@ -1,37 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/cdn"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
domain = os.Getenv("QINIU_TEST_DOMAIN")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cdnManager := cdn.NewCdnManager(mac)
domains := []string{
domain,
}
day := "2017-07-30"
ret, err := cdnManager.GetCdnLogList(day, domains)
if err != nil {
fmt.Println(err)
return
}
domainLogs := ret.Data
for domain, logs := range domainLogs {
fmt.Println(domain)
for _, item := range logs {
fmt.Println(item.Name, item.URL, item.Size, item.ModifiedTime)
}
}
}

View File

@@ -1,33 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/cdn"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
domain = os.Getenv("QINIU_TEST_DOMAIN")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cdnManager := cdn.NewCdnManager(mac)
// 预取链接单次请求链接不可以超过100个如果超过请分批发送请求
urlsToPrefetch := []string{
"http://if-pbl.qiniudn.com/qiniu.png",
"http://if-pbl.qiniudn.com/github.png",
}
ret, err := cdnManager.PrefetchUrls(urlsToPrefetch)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(ret.Code)
fmt.Println(ret.RequestID)
}

View File

@@ -1,48 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/cdn"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
domain = os.Getenv("QINIU_TEST_DOMAIN")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cdnManager := cdn.NewCdnManager(mac)
//刷新链接单次请求链接不可以超过100个如果超过请分批发送请求
urlsToRefresh := []string{
"http://if-pbl.qiniudn.com/qiniu.png",
"http://if-pbl.qiniudn.com/github.png",
}
ret, err := cdnManager.RefreshUrls(urlsToRefresh)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(ret.Code)
fmt.Println(ret.RequestID)
// 刷新目录,刷新目录需要联系七牛技术支持开通权限
// 单次请求链接不可以超过10个如果超过请分批发送请求
dirsToRefresh := []string{
"http://if-pbl.qiniudn.com/images/",
"http://if-pbl.qiniudn.com/static/",
}
ret, err = cdnManager.RefreshDirs(dirsToRefresh)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(ret.Code)
fmt.Println(ret.RequestID)
fmt.Println(ret.Error)
}

View File

@@ -1,91 +0,0 @@
package main
import (
"encoding/base64"
"fmt"
"os"
"strings"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
// 简单上传凭证
putPolicy := storage.PutPolicy{
Scope: bucket,
}
mac := qbox.NewMac(accessKey, secretKey)
upToken := putPolicy.UploadToken(mac)
fmt.Println(upToken)
// 设置上传凭证有效期
putPolicy = storage.PutPolicy{
Scope: bucket,
}
putPolicy.Expires = 7200 //示例2小时有效期
upToken = putPolicy.UploadToken(mac)
fmt.Println(upToken)
// 覆盖上传凭证
// 需要覆盖的文件名
keyToOverwrite := "qiniu.mp4"
putPolicy = storage.PutPolicy{
Scope: fmt.Sprintf("%s:%s", bucket, keyToOverwrite),
}
upToken = putPolicy.UploadToken(mac)
fmt.Println(upToken)
// 自定义上传回复凭证
putPolicy = storage.PutPolicy{
Scope: bucket,
ReturnBody: `{"key":"$(key)","hash":"$(etag)","fsize":$(fsize),"bucket":"$(bucket)","name":"$(x:name)"}`,
}
upToken = putPolicy.UploadToken(mac)
fmt.Println(upToken)
// 带回调业务服务器的凭证(JSON方式)
putPolicy = storage.PutPolicy{
Scope: bucket,
CallbackURL: "http://api.example.com/qiniu/upload/callback",
CallbackBody: `{"key":"$(key)","hash":"$(etag)","fsize":$(fsize),"bucket":"$(bucket)","name":"$(x:name)"}`,
CallbackBodyType: "application/json",
}
upToken = putPolicy.UploadToken(mac)
fmt.Println(upToken)
// 带回调业务服务器的凭证URL方式
putPolicy = storage.PutPolicy{
Scope: bucket,
CallbackURL: "http://api.example.com/qiniu/upload/callback",
CallbackBody: "key=$(key)&hash=$(etag)&bucket=$(bucket)&fsize=$(fsize)&name=$(x:name)",
}
upToken = putPolicy.UploadToken(mac)
fmt.Println(upToken)
// 带数据处理的凭证
saveMp4Entry := base64.URLEncoding.EncodeToString([]byte(bucket + ":avthumb_test_target.mp4"))
saveJpgEntry := base64.URLEncoding.EncodeToString([]byte(bucket + ":vframe_test_target.jpg"))
//数据处理指令,支持多个指令
avthumbMp4Fop := "avthumb/mp4|saveas/" + saveMp4Entry
vframeJpgFop := "vframe/jpg/offset/1|saveas/" + saveJpgEntry
//连接多个操作指令
persistentOps := strings.Join([]string{avthumbMp4Fop, vframeJpgFop}, ";")
pipeline := "test"
putPolicy = storage.PutPolicy{
Scope: bucket,
PersistentOps: persistentOps,
PersistentPipeline: pipeline,
PersistentNotifyURL: "http://api.example.com/qiniu/pfop/notify",
}
upToken = putPolicy.UploadToken(mac)
fmt.Println(upToken)
}

View File

@@ -1,76 +0,0 @@
package main
import (
"context"
"fmt"
"net"
"os"
"net/http"
"net/url"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
"github.com/qiniu/x/rpc.v7"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
localFile := "/Users/jemy/Documents/github.png"
key := "github-x.png"
putPolicy := storage.PutPolicy{
Scope: bucket + ":" + key,
}
mac := qbox.NewMac(accessKey, secretKey)
upToken := putPolicy.UploadToken(mac)
cfg := storage.Config{}
// 空间对应的机房
cfg.Zone = &storage.ZoneHuadong
// 是否使用https域名
cfg.UseHTTPS = false
// 上传是否使用CDN上传加速
cfg.UseCdnDomains = false
//设置代理
proxyURL := "http://localhost:8888"
proxyURI, _ := url.Parse(proxyURL)
//绑定网卡
nicIP := "100.100.33.138"
dialer := &net.Dialer{
LocalAddr: &net.TCPAddr{
IP: net.ParseIP(nicIP),
},
}
//构建代理client对象
client := http.Client{
Transport: &http.Transport{
Proxy: http.ProxyURL(proxyURI),
Dial: dialer.Dial,
},
}
// 构建表单上传的对象
formUploader := storage.NewFormUploaderEx(&cfg, &rpc.Client{Client: &client})
ret := storage.PutRet{}
// 可选配置
putExtra := storage.PutExtra{
Params: map[string]string{
"x:name": "github logo",
},
}
//putExtra.NoCrc32Check = true
err := formUploader.PutFile(context.Background(), &ret, upToken, key, localFile, &putExtra)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(ret.Key, ret.Hash)
}

View File

@@ -1,32 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
UseHTTPS: false,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
operationManager := storage.NewOperationManager(mac, &cfg)
persistentId := "z0.597f28b445a2650c994bb208"
ret, err := operationManager.Prefop(persistentId)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(ret.String())
}

View File

@@ -1,127 +0,0 @@
package main
import (
"crypto/md5"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"context"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func md5Hex(str string) string {
h := md5.New()
h.Write([]byte(str))
return hex.EncodeToString(h.Sum(nil))
}
type ProgressRecord struct {
Progresses []storage.BlkputRet `json:"progresses"`
}
func main() {
localFile := "your local file path"
key := "your file save key"
putPolicy := storage.PutPolicy{
Scope: bucket,
}
mac := qbox.NewMac(accessKey, secretKey)
upToken := putPolicy.UploadToken(mac)
cfg := storage.Config{}
// 空间对应的机房
cfg.Zone = &storage.ZoneHuadong
// 是否使用https域名
cfg.UseHTTPS = false
// 上传是否使用CDN上传加速
cfg.UseCdnDomains = false
// 必须仔细选择一个能标志上传唯一性的 recordKey 用来记录上传进度
// 我们这里采用 md5(bucket+key+local_path+local_file_last_modified)+".progress" 作为记录上传进度的文件名
fileInfo, statErr := os.Stat(localFile)
if statErr != nil {
fmt.Println(statErr)
return
}
fileSize := fileInfo.Size()
fileLmd := fileInfo.ModTime().UnixNano()
recordKey := md5Hex(fmt.Sprintf("%s:%s:%s:%s", bucket, key, localFile, fileLmd)) + ".progress"
// 指定的进度文件保存目录,实际情况下,请确保该目录存在,而且只用于记录进度文件
recordDir := "/Users/jemy/Temp/progress"
mErr := os.MkdirAll(recordDir, 0755)
if mErr != nil {
fmt.Println("mkdir for record dir error,", mErr)
return
}
recordPath := filepath.Join(recordDir, recordKey)
progressRecord := ProgressRecord{}
// 尝试从旧的进度文件中读取进度
recordFp, openErr := os.Open(recordPath)
if openErr == nil {
progressBytes, readErr := ioutil.ReadAll(recordFp)
if readErr == nil {
mErr := json.Unmarshal(progressBytes, &progressRecord)
if mErr == nil {
// 检查context 是否过期避免701错误
for _, item := range progressRecord.Progresses {
if storage.IsContextExpired(item) {
fmt.Println(item.ExpiredAt)
progressRecord.Progresses = make([]storage.BlkputRet, storage.BlockCount(fileSize))
break
}
}
}
}
recordFp.Close()
}
if len(progressRecord.Progresses) == 0 {
progressRecord.Progresses = make([]storage.BlkputRet, storage.BlockCount(fileSize))
}
resumeUploader := storage.NewResumeUploader(&cfg)
ret := storage.PutRet{}
progressLock := sync.RWMutex{}
putExtra := storage.RputExtra{
Progresses: progressRecord.Progresses,
Notify: func(blkIdx int, blkSize int, ret *storage.BlkputRet) {
progressLock.Lock()
defer progressLock.Unlock()
//将进度序列化,然后写入文件
progressRecord.Progresses[blkIdx] = *ret
progressBytes, _ := json.Marshal(progressRecord)
fmt.Println("write progress file", blkIdx, recordPath)
wErr := ioutil.WriteFile(recordPath, progressBytes, 0644)
if wErr != nil {
fmt.Println("write progress file error,", wErr)
}
},
}
err := resumeUploader.PutFile(context.Background(), &ret, upToken, key, localFile, &putExtra)
if err != nil {
fmt.Println(err)
return
}
//上传成功之后,一定记得删除这个进度文件
os.Remove(recordPath)
fmt.Println(ret.Key, ret.Hash)
}

View File

@@ -1,72 +0,0 @@
package main
import (
"context"
"fmt"
"net"
"net/http"
"net/url"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
"github.com/qiniu/x/rpc.v7"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
localFile := "/Users/jemy/Documents/github.png"
key := "qiniu-x.png"
putPolicy := storage.PutPolicy{
Scope: bucket,
}
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{}
// 空间对应的机房
cfg.Zone = &storage.ZoneHuadong
// 是否使用https域名
cfg.UseHTTPS = false
// 上传是否使用CDN上传加速
cfg.UseCdnDomains = false
//设置代理
proxyURL := "http://localhost:8888"
proxyURI, _ := url.Parse(proxyURL)
//绑定网卡
nicIP := "100.100.33.138"
dialer := &net.Dialer{
LocalAddr: &net.TCPAddr{
IP: net.ParseIP(nicIP),
},
}
//构建代理client对象
client := http.Client{
Transport: &http.Transport{
Proxy: http.ProxyURL(proxyURI),
Dial: dialer.Dial,
},
}
resumeUploader := storage.NewResumeUploaderEx(&cfg, &rpc.Client{Client: &client})
upToken := putPolicy.UploadToken(mac)
ret := storage.PutRet{}
err := resumeUploader.PutFile(context.Background(), &ret, upToken, key, localFile, nil)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(ret.Key, ret.Hash)
}

View File

@@ -1,64 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
"github.com/qiniu/x/rpc.v7"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
// 是否使用https域名进行资源管理
UseHTTPS: false,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
bucketManager := storage.NewBucketManager(mac, &cfg)
chgmKeys := map[string]string{
"github1.png": "image/x-png",
"github2.png": "image/x-png",
"github3.png": "image/x-png",
"github4.png": "image/x-png",
"github5.png": "image/x-png",
}
chgmOps := make([]string, 0, len(chgmKeys))
for key, newMime := range chgmKeys {
chgmOps = append(chgmOps, storage.URIChangeMime(bucket, key, newMime))
}
rets, err := bucketManager.Batch(chgmOps)
if err != nil {
// 遇到错误
if _, ok := err.(*rpc.ErrorInfo); ok {
for _, ret := range rets {
// 200 为成功
fmt.Printf("%d\n", ret.Code)
if ret.Code != 200 {
fmt.Printf("%s\n", ret.Data.Error)
}
}
} else {
fmt.Printf("batch error, %s", err)
}
} else {
// 完全成功
for _, ret := range rets {
// 200 为成功
fmt.Printf("%d\n", ret.Code)
if ret.Code != 200 {
fmt.Printf("%s\n", ret.Data.Error)
}
}
}
}

View File

@@ -1,67 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
"github.com/qiniu/x/rpc.v7"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
// 是否使用https域名进行资源管理
UseHTTPS: false,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
bucketManager := storage.NewBucketManager(mac, &cfg)
//每个batch的操作数量不可以超过1000个如果总数量超过1000需要分批发送
chtypeKeys := map[string]int{
"github1.png": 1,
"github2.png": 1,
"github3.png": 1,
"github4.png": 1,
"github5.png": 1,
}
chtypeOps := make([]string, 0, len(chtypeKeys))
for key, fileType := range chtypeKeys {
chtypeOps = append(chtypeOps, storage.URIChangeType(bucket, key, fileType))
}
rets, err := bucketManager.Batch(chtypeOps)
if err != nil {
// 遇到错误
if _, ok := err.(*rpc.ErrorInfo); ok {
for _, ret := range rets {
// 200 为成功
fmt.Printf("%d\n", ret.Code)
if ret.Code != 200 {
fmt.Printf("%s\n", ret.Data.Error)
}
}
} else {
fmt.Printf("batch error, %s", err)
}
} else {
// 完全成功
for _, ret := range rets {
// 200 为成功
fmt.Printf("%d\n", ret.Code)
if ret.Code != 200 {
fmt.Printf("%s\n", ret.Data.Error)
}
}
}
}

View File

@@ -1,67 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
"github.com/qiniu/x/rpc.v7"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
// 是否使用https域名进行资源管理
UseHTTPS: false,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
bucketManager := storage.NewBucketManager(mac, &cfg)
//每个batch的操作数量不可以超过1000个如果总数量超过1000需要分批发送
srcBucket := bucket
destBucket := bucket
force := true
copyKeys := map[string]string{
"github1.png": "github1-copy.png",
"github2.png": "github2-copy.png",
"github3.png": "github3-copy.png",
"github4.png": "github4-copy.png",
"github5.png": "github5-copy.png",
}
copyOps := make([]string, 0, len(copyKeys))
for srcKey, destKey := range copyKeys {
copyOps = append(copyOps, storage.URICopy(srcBucket, srcKey, destBucket, destKey, force))
}
rets, err := bucketManager.Batch(copyOps)
if err != nil {
// 遇到错误
if _, ok := err.(*rpc.ErrorInfo); ok {
for _, ret := range rets {
// 200 为成功
fmt.Printf("%d\n", ret.Code)
if ret.Code != 200 {
fmt.Printf("%s\n", ret.Data.Error)
}
}
} else {
fmt.Printf("batch error, %s", err)
}
} else {
// 完全成功
for _, ret := range rets {
// 200 为成功
fmt.Printf("%d\n", ret.Code)
fmt.Printf("%v\n", ret.Data)
}
}
}

View File

@@ -1,63 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
"github.com/qiniu/x/rpc.v7"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
// 是否使用https域名进行资源管理
UseHTTPS: false,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
bucketManager := storage.NewBucketManager(mac, &cfg)
//每个batch的操作数量不可以超过1000个如果总数量超过1000需要分批发送
keys := []string{
"github1.png",
"github2.png",
"github3.png",
"github4.png",
"github5.png",
}
deleteOps := make([]string, 0, len(keys))
for _, key := range keys {
deleteOps = append(deleteOps, storage.URIDelete(bucket, key))
}
rets, err := bucketManager.Batch(deleteOps)
if err != nil {
// 遇到错误
if _, ok := err.(*rpc.ErrorInfo); ok {
for _, ret := range rets {
// 200 为成功
fmt.Printf("%d\n", ret.Code)
if ret.Code != 200 {
fmt.Printf("%s\n", ret.Data.Error)
}
}
} else {
fmt.Printf("batch error, %s", err)
}
} else {
// 完全成功
for _, ret := range rets {
// 200 为成功
fmt.Printf("%d\n", ret.Code)
}
}
}

View File

@@ -1,66 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
"github.com/qiniu/x/rpc.v7"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
// 是否使用https域名进行资源管理
UseHTTPS: false,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
bucketManager := storage.NewBucketManager(mac, &cfg)
//每个batch的操作数量不可以超过1000个如果总数量超过1000需要分批发送
expireKeys := map[string]int{
"github1.png": 7,
"github2.png": 8,
"github3.png": 9,
"github4.png": 10,
"github5.png": 11,
}
expireOps := make([]string, 0, len(expireKeys))
for key, expire := range expireKeys {
expireOps = append(expireOps, storage.URIDeleteAfterDays(bucket, key, expire))
}
rets, err := bucketManager.Batch(expireOps)
if err != nil {
// 遇到错误
if _, ok := err.(*rpc.ErrorInfo); ok {
for _, ret := range rets {
// 200 为成功
fmt.Printf("%d\n", ret.Code)
if ret.Code != 200 {
fmt.Printf("%s\n", ret.Data.Error)
}
}
} else {
fmt.Printf("batch error, %s", err)
}
} else {
// 完全成功
for _, ret := range rets {
// 200 为成功
fmt.Printf("%d\n", ret.Code)
if ret.Code != 200 {
fmt.Printf("%s\n", ret.Data.Error)
}
}
}
}

View File

@@ -1,67 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
"github.com/qiniu/x/rpc.v7"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
// 是否使用https域名进行资源管理
UseHTTPS: false,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
bucketManager := storage.NewBucketManager(mac, &cfg)
//每个batch的操作数量不可以超过1000个如果总数量超过1000需要分批发送
srcBucket := bucket
destBucket := bucket
force := true
moveKeys := map[string]string{
"github1.png": "github1-move.png",
"github2.png": "github2-move.png",
"github3.png": "github3-move.png",
"github4.png": "github4-move.png",
"github5.png": "github5-move.png",
}
moveOps := make([]string, 0, len(moveKeys))
for srcKey, destKey := range moveKeys {
moveOps = append(moveOps, storage.URIMove(srcBucket, srcKey, destBucket, destKey, force))
}
rets, err := bucketManager.Batch(moveOps)
if err != nil {
// 遇到错误
if _, ok := err.(*rpc.ErrorInfo); ok {
for _, ret := range rets {
// 200 为成功
fmt.Printf("%d\n", ret.Code)
if ret.Code != 200 {
fmt.Printf("%s\n", ret.Data.Error)
}
}
} else {
fmt.Printf("batch error, %s", err)
}
} else {
// 完全成功
for _, ret := range rets {
// 200 为成功
fmt.Printf("%d\n", ret.Code)
fmt.Printf("%v\n", ret.Data)
}
}
}

View File

@@ -1,66 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
"github.com/qiniu/x/rpc.v7"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
// 是否使用https域名进行资源管理
UseHTTPS: false,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
bucketManager := storage.NewBucketManager(mac, &cfg)
//每个batch的操作数量不可以超过1000个如果总数量超过1000需要分批发送
keys := []string{
"github1.png",
"github2.png",
"github3.png",
"github4.png",
"github5.png",
}
statOps := make([]string, 0, len(keys))
for _, key := range keys {
statOps = append(statOps, storage.URIStat(bucket, key))
}
rets, err := bucketManager.Batch(statOps)
if err != nil {
// 遇到错误
if _, ok := err.(*rpc.ErrorInfo); ok {
for _, ret := range rets {
// 200 为成功
fmt.Printf("%d\n", ret.Code)
if ret.Code != 200 {
fmt.Printf("%s\n", ret.Data.Error)
} else {
fmt.Printf("%v\n", ret.Data)
}
}
} else {
fmt.Printf("batch error, %s", err)
}
} else {
// 完全成功
for _, ret := range rets {
// 200 为成功
fmt.Printf("%d\n", ret.Code)
fmt.Printf("%v\n", ret.Data)
}
}
}

View File

@@ -1,36 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
// 是否使用https域名进行资源管理
UseHTTPS: false,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
bucketManager := storage.NewBucketManager(mac, &cfg)
key := "github.png"
newMime := "image/x-png"
err := bucketManager.ChangeMime(bucket, key, newMime)
if err != nil {
fmt.Println(err)
return
}
}

View File

@@ -1,36 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
// 是否使用https域名进行资源管理
UseHTTPS: false,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
bucketManager := storage.NewBucketManager(mac, &cfg)
key := "github.png"
fileType := 1 // 0 表示普通存储1表示低频存储
err := bucketManager.ChangeType(bucket, key, fileType)
if err != nil {
fmt.Println(err)
return
}
}

View File

@@ -1,42 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
// 是否使用https域名进行资源管理
UseHTTPS: false,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
bucketManager := storage.NewBucketManager(mac, &cfg)
srcBucket := "if-pbl"
srcKey := "github.png"
//目标空间可以和源空间相同,但是不能为跨机房的空间
destBucket := srcBucket
//目标文件名可以和源文件名相同,也可以不同
destKey := "github-new.png"
//如果目标文件存在是否强制覆盖如果不覆盖默认返回614 file exists
force := false
err := bucketManager.Copy(srcBucket, srcKey, destBucket, destKey, force)
if err != nil {
fmt.Println(err)
return
}
}

View File

@@ -1,35 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
// 是否使用https域名进行资源管理
UseHTTPS: false,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
bucketManager := storage.NewBucketManager(mac, &cfg)
key := "github.png"
err := bucketManager.Delete(bucket, key)
if err != nil {
fmt.Println(err)
return
}
}

View File

@@ -1,36 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
// 是否使用https域名进行资源管理
UseHTTPS: false,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
bucketManager := storage.NewBucketManager(mac, &cfg)
key := "github.png"
days := 7
err := bucketManager.DeleteAfterDays(bucket, key, days)
if err != nil {
fmt.Println(err)
return
}
}

View File

@@ -1,33 +0,0 @@
package main
import (
"fmt"
"os"
"time"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
// 公开空间访问
domain := "https://image.example.com"
key := "这是一个测试文件.jpg"
publicAccessURL := storage.MakePublicURL(domain, key)
fmt.Println(publicAccessURL)
// 私有空间访问
domain = "https://image.example.com"
key = "这是一个测试文件.jpg"
deadline := time.Now().Add(time.Second * 3600).Unix() //1小时有效期
privateAccessURL := storage.MakePrivateURL(mac, domain, key, deadline)
fmt.Println(privateAccessURL)
}

View File

@@ -1,44 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
// 是否使用https域名进行资源管理
UseHTTPS: false,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
bucketManager := storage.NewBucketManager(mac, &cfg)
resURL := "http://devtools.qiniu.com/qiniu.png"
// 指定保存的key
fetchRet, err := bucketManager.Fetch(resURL, bucket, "qiniu.png")
if err != nil {
fmt.Println("fetch error,", err)
} else {
fmt.Println(fetchRet.String())
}
// 不指定保存的key默认用文件hash作为文件名
fetchRet, err = bucketManager.FetchWithoutKey(resURL, bucket)
if err != nil {
fmt.Println("fetch error,", err)
} else {
fmt.Println(fetchRet.String())
}
}

View File

@@ -1,51 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
// 是否使用https域名进行资源管理
UseHTTPS: false,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
bucketManager := storage.NewBucketManager(mac, &cfg)
limit := 1000
prefix := "qiniu"
delimiter := ""
//初始列举marker为空
marker := ""
for {
entries, _, nextMarker, hashNext, err := bucketManager.ListFiles(bucket, prefix, delimiter, marker, limit)
if err != nil {
fmt.Println("list error,", err)
break
}
//print entries
for _, entry := range entries {
fmt.Println(entry.Key)
}
if hashNext {
marker = nextMarker
} else {
//list end
break
}
}
}

View File

@@ -1,42 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
// 是否使用https域名进行资源管理
UseHTTPS: false,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
bucketManager := storage.NewBucketManager(mac, &cfg)
srcBucket := bucket
srcKey := "github.png"
//目标空间可以和源空间相同,但是不能为跨机房的空间
destBucket := srcBucket
//目标文件名可以和源文件名相同,也可以不同
destKey := "github-new.png"
//如果目标文件存在是否强制覆盖如果不覆盖默认返回614 file exists
force := false
err := bucketManager.Move(srcBucket, srcKey, destBucket, destKey, force)
if err != nil {
fmt.Println(err)
return
}
}

View File

@@ -1,34 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
// 是否使用https域名进行资源管理
UseHTTPS: false,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
bucketManager := storage.NewBucketManager(mac, &cfg)
key := "qiniu.png"
err := bucketManager.Prefetch(bucket, key)
if err != nil {
fmt.Println("fetch error,", err)
}
}

View File

@@ -1,38 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
// 是否使用https域名进行资源管理
UseHTTPS: false,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
bucketManager := storage.NewBucketManager(mac, &cfg)
key := "qiniu.png"
fileInfo, sErr := bucketManager.Stat(bucket, key)
if sErr != nil {
fmt.Println(sErr)
return
}
fmt.Println(fileInfo.String())
//可以解析文件的PutTime
fmt.Println(storage.ParsePutTime(fileInfo.PutTime))
}

View File

@@ -1,52 +0,0 @@
package main
import (
"encoding/base64"
"fmt"
"os"
"strings"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
)
var (
accessKey = os.Getenv("QINIU_ACCESS_KEY")
secretKey = os.Getenv("QINIU_SECRET_KEY")
bucket = os.Getenv("QINIU_TEST_BUCKET")
// 数据处理的私有队列,必须指定以保障处理速度
pipeline = os.Getenv("QINIU_TEST_PIPELINE")
)
func main() {
mac := qbox.NewMac(accessKey, secretKey)
cfg := storage.Config{
UseHTTPS: true,
}
// 指定空间所在的区域,如果不指定将自动探测
// 如果没有特殊需求,默认不需要指定
//cfg.Zone=&storage.ZoneHuabei
operationManager := storage.NewOperationManager(mac, &cfg)
key := "qiniu.mp4"
saveBucket := bucket
// 处理指令集合
fopAvthumb := fmt.Sprintf("avthumb/mp4/s/480x320/vb/500k|saveas/%s",
storage.EncodedEntry(saveBucket, "pfop_test_qiniu.mp4"))
fopVframe := fmt.Sprintf("vframe/jpg/offset/10|saveas/%s",
storage.EncodedEntry(saveBucket, "pfop_test_qiniu.jpg"))
fopVsample := fmt.Sprintf("vsample/jpg/interval/20/pattern/%s",
base64.URLEncoding.EncodeToString([]byte("pfop_test_$(count).jpg")))
fopBatch := []string{fopAvthumb, fopVframe, fopVsample}
fops := strings.Join(fopBatch, ";")
// 强制重新执行数据处理任务
force := true
// 数据处理指令全部完成之后,通知该地址
notifyURL := "http://api.example.com/pfop/callback"
// 数据处理的私有队列,必须指定以保障处理速度
persistentId, err := operationManager.Pfop(bucket, key, fops, pipeline, notifyURL, force)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(persistentId)
}

View File

@@ -1,172 +0,0 @@
package storage
import (
"bytes"
"context"
"encoding/base64"
"fmt"
"hash/crc32"
"io"
"strconv"
"strings"
"github.com/qiniu/x/rpc.v7"
)
// Base64Uploader 表示一个Base64上传对象
type Base64Uploader struct {
client *rpc.Client
cfg *Config
}
// NewBase64Uploader 用来构建一个Base64上传的对象
func NewBase64Uploader(cfg *Config) *Base64Uploader {
if cfg == nil {
cfg = &Config{}
}
return &Base64Uploader{
client: &rpc.DefaultClient,
cfg: cfg,
}
}
// NewBase64UploaderEx 用来构建一个Base64上传的对象
func NewBase64UploaderEx(cfg *Config, client *rpc.Client) *Base64Uploader {
if cfg == nil {
cfg = &Config{}
}
if client == nil {
client = &rpc.DefaultClient
}
return &Base64Uploader{
client: client,
cfg: cfg,
}
}
// Base64PutExtra 为Base64上传的额外可选项
type Base64PutExtra struct {
// 可选,用户自定义参数,必须以 "x:" 开头。若不以x:开头,则忽略。
Params map[string]string
// 可选,当为 "" 时候,服务端自动判断。
MimeType string
}
// Put 用来以Base64方式上传一个文件
//
// ctx 是请求的上下文。
// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 callbackUrl 或 returnBody那么返回的数据结构是 PutRet 结构。
// uptoken 是由业务服务器颁发的上传凭证。
// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外key 为空字符串是合法的。
// base64Data 是要上传的Base64数据一般为图片数据的Base64编码字符串
// extra 是上传的一些可选项可以指定为nil。详细见 Base64PutExtra 结构的描述。
//
func (p *Base64Uploader) Put(
ctx context.Context, ret interface{}, uptoken, key string, base64Data []byte, extra *Base64PutExtra) (err error) {
return p.put(ctx, ret, uptoken, key, true, base64Data, extra)
}
// PutWithoutKey 用来以Base64方式上传一个文件保存的文件名以文件的内容hash作为文件名
func (p *Base64Uploader) PutWithoutKey(
ctx context.Context, ret interface{}, uptoken string, base64Data []byte, extra *Base64PutExtra) (err error) {
return p.put(ctx, ret, uptoken, "", false, base64Data, extra)
}
func (p *Base64Uploader) put(
ctx context.Context, ret interface{}, uptoken, key string, hasKey bool, base64Data []byte, extra *Base64PutExtra) (err error) {
//get up host
ak, bucket, gErr := getAkBucketFromUploadToken(uptoken)
if gErr != nil {
err = gErr
return
}
var upHost string
upHost, err = p.upHost(ak, bucket)
if err != nil {
return
}
//set default extra
if extra == nil {
extra = &Base64PutExtra{}
}
//calc crc32
h := crc32.NewIEEE()
rawReader := base64.NewDecoder(base64.StdEncoding, bytes.NewReader(base64Data))
fsize, decodeErr := io.Copy(h, rawReader)
if decodeErr != nil {
err = fmt.Errorf("invalid base64 data, %s", decodeErr.Error())
return
}
fCrc32 := h.Sum32()
postPath := bytes.NewBufferString("/putb64")
//add fsize
postPath.WriteString("/")
postPath.WriteString(strconv.Itoa(int(fsize)))
//add key
if hasKey {
postPath.WriteString("/key/")
postPath.WriteString(base64.URLEncoding.EncodeToString([]byte(key)))
}
//add mimeType
if extra.MimeType != "" {
postPath.WriteString("/mimeType/")
postPath.WriteString(base64.URLEncoding.EncodeToString([]byte(extra.MimeType)))
}
//add crc32
postPath.WriteString("/crc32/")
postPath.WriteString(fmt.Sprintf("%d", fCrc32))
//add extra params
if len(extra.Params) > 0 {
for k, v := range extra.Params {
if strings.HasPrefix(k, "x:") && v != "" {
postPath.WriteString("/")
postPath.WriteString(k)
postPath.WriteString("/")
postPath.WriteString(base64.URLEncoding.EncodeToString([]byte(v)))
}
}
}
postURL := fmt.Sprintf("%s%s", upHost, postPath.String())
postClient := newUptokenClient(p.client, uptoken)
return postClient.CallWith(ctx, ret, "POST", postURL, "application/octet-stream",
bytes.NewReader(base64Data), len(base64Data))
}
func (p *Base64Uploader) upHost(ak, bucket string) (upHost string, err error) {
var zone *Zone
if p.cfg.Zone != nil {
zone = p.cfg.Zone
} else {
if v, zoneErr := GetZone(ak, bucket); zoneErr != nil {
err = zoneErr
return
} else {
zone = v
}
}
scheme := "http://"
if p.cfg.UseHTTPS {
scheme = "https://"
}
host := zone.SrcUpHosts[0]
if p.cfg.UseCdnDomains {
host = zone.CdnUpHosts[0]
}
upHost = fmt.Sprintf("%s%s", scheme, host)
return
}

File diff suppressed because one or more lines are too long

View File

@@ -1,553 +0,0 @@
package storage
import (
"context"
"encoding/base64"
"errors"
"fmt"
"net/url"
"strconv"
"strings"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/x/rpc.v7"
)
// 资源管理相关的默认域名
const (
DefaultRsHost = "rs.qiniu.com"
DefaultRsfHost = "rsf.qiniu.com"
DefaultAPIHost = "api.qiniu.com"
DefaultPubHost = "pu.qbox.me:10200"
)
// FileInfo 文件基本信息
type FileInfo struct {
Hash string `json:"hash"`
Fsize int64 `json:"fsize"`
PutTime int64 `json:"putTime"`
MimeType string `json:"mimeType"`
Type int `json:"type"`
}
func (f *FileInfo) String() string {
str := ""
str += fmt.Sprintf("Hash: %s\n", f.Hash)
str += fmt.Sprintf("Fsize: %d\n", f.Fsize)
str += fmt.Sprintf("PutTime: %d\n", f.PutTime)
str += fmt.Sprintf("MimeType: %s\n", f.MimeType)
str += fmt.Sprintf("Type: %d\n", f.Type)
return str
}
// FetchRet 资源抓取的返回值
type FetchRet struct {
Hash string `json:"hash"`
Fsize int64 `json:"fsize"`
MimeType string `json:"mimeType"`
Key string `json:"key"`
}
func (r *FetchRet) String() string {
str := ""
str += fmt.Sprintf("Key: %s\n", r.Key)
str += fmt.Sprintf("Hash: %s\n", r.Hash)
str += fmt.Sprintf("Fsize: %d\n", r.Fsize)
str += fmt.Sprintf("MimeType: %s\n", r.MimeType)
return str
}
// ListItem 为文件列举的返回值
type ListItem struct {
Key string `json:"key"`
Hash string `json:"hash"`
Fsize int64 `json:"fsize"`
PutTime int64 `json:"putTime"`
MimeType string `json:"mimeType"`
Type int `json:"type"`
EndUser string `json:"endUser"`
}
func (l *ListItem) String() string {
str := ""
str += fmt.Sprintf("Hash: %s\n", l.Hash)
str += fmt.Sprintf("Fsize: %d\n", l.Fsize)
str += fmt.Sprintf("PutTime: %d\n", l.PutTime)
str += fmt.Sprintf("MimeType: %s\n", l.MimeType)
str += fmt.Sprintf("Type: %d\n", l.Type)
str += fmt.Sprintf("EndUser: %s\n", l.EndUser)
return str
}
// BatchOpRet 为批量执行操作的返回值
// 批量操作支持 statcopydeletemovechgmchtypedeleteAfterDays几个操作
// 其中 stat 为获取文件的基本信息,如果文件存在则返回基本信息,如果文件不存在返回 error 。
// 其他的操作,如果成功,则返回 code不成功会同时返回 error 信息,可以根据 error 信息来判断问题所在。
type BatchOpRet struct {
Code int `json:"code,omitempty"`
Data struct {
Hash string `json:"hash"`
Fsize int64 `json:"fsize"`
PutTime int64 `json:"putTime"`
MimeType string `json:"mimeType"`
Type int `json:"type"`
Error string `json:"error"`
} `json:"data,omitempty"`
}
// BucketManager 提供了对资源进行管理的操作
type BucketManager struct {
client *rpc.Client
mac *qbox.Mac
cfg *Config
}
// NewBucketManager 用来构建一个新的资源管理对象
func NewBucketManager(mac *qbox.Mac, cfg *Config) *BucketManager {
if cfg == nil {
cfg = &Config{}
}
return &BucketManager{
client: NewClient(mac, nil),
mac: mac,
cfg: cfg,
}
}
// NewBucketManagerEx 用来构建一个新的资源管理对象
func NewBucketManagerEx(mac *qbox.Mac, cfg *Config, client *rpc.Client) *BucketManager {
if cfg == nil {
cfg = &Config{}
}
if client == nil {
client = NewClient(mac, nil)
}
return &BucketManager{
client: client,
mac: mac,
cfg: cfg,
}
}
// Buckets 用来获取空间列表,如果指定了 shared 参数为 true那么一同列表被授权访问的空间
func (m *BucketManager) Buckets(shared bool) (buckets []string, err error) {
ctx := context.TODO()
var reqHost string
scheme := "http://"
if m.cfg.UseHTTPS {
scheme = "https://"
}
reqHost = fmt.Sprintf("%s%s", scheme, DefaultRsHost)
reqURL := fmt.Sprintf("%s/buckets?shared=%v", reqHost, shared)
err = m.client.Call(ctx, &buckets, "POST", reqURL)
return
}
// Stat 用来获取一个文件的基本信息
func (m *BucketManager) Stat(bucket, key string) (info FileInfo, err error) {
ctx := context.TODO()
reqHost, reqErr := m.rsHost(bucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, URIStat(bucket, key))
err = m.client.Call(ctx, &info, "POST", reqURL)
return
}
// Delete 用来删除空间中的一个文件
func (m *BucketManager) Delete(bucket, key string) (err error) {
ctx := context.TODO()
reqHost, reqErr := m.rsHost(bucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, URIDelete(bucket, key))
err = m.client.Call(ctx, nil, "POST", reqURL)
return
}
// Copy 用来创建已有空间中的文件的一个新的副本
func (m *BucketManager) Copy(srcBucket, srcKey, destBucket, destKey string, force bool) (err error) {
ctx := context.TODO()
reqHost, reqErr := m.rsHost(srcBucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, URICopy(srcBucket, srcKey, destBucket, destKey, force))
err = m.client.Call(ctx, nil, "POST", reqURL)
return
}
// Move 用来将空间中的一个文件移动到新的空间或者重命名
func (m *BucketManager) Move(srcBucket, srcKey, destBucket, destKey string, force bool) (err error) {
ctx := context.TODO()
reqHost, reqErr := m.rsHost(srcBucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, URIMove(srcBucket, srcKey, destBucket, destKey, force))
err = m.client.Call(ctx, nil, "POST", reqURL)
return
}
// ChangeMime 用来更新文件的MimeType
func (m *BucketManager) ChangeMime(bucket, key, newMime string) (err error) {
ctx := context.TODO()
reqHost, reqErr := m.rsHost(bucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, URIChangeMime(bucket, key, newMime))
err = m.client.Call(ctx, nil, "POST", reqURL)
return
}
// ChangeType 用来更新文件的存储类型0表示普通存储1表示低频存储
func (m *BucketManager) ChangeType(bucket, key string, fileType int) (err error) {
ctx := context.TODO()
reqHost, reqErr := m.rsHost(bucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, URIChangeType(bucket, key, fileType))
err = m.client.Call(ctx, nil, "POST", reqURL)
return
}
// DeleteAfterDays 用来更新文件生命周期,如果 days 设置为0则表示取消文件的定期删除功能永久存储
func (m *BucketManager) DeleteAfterDays(bucket, key string, days int) (err error) {
ctx := context.TODO()
reqHost, reqErr := m.rsHost(bucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, URIDeleteAfterDays(bucket, key, days))
err = m.client.Call(ctx, nil, "POST", reqURL)
return
}
// Batch 接口提供了资源管理的批量操作,支持 statcopymovedeletechgmchtypedeleteAfterDays几个接口
func (m *BucketManager) Batch(operations []string) (batchOpRet []BatchOpRet, err error) {
if len(operations) > 1000 {
err = errors.New("batch operation count exceeds the limit of 1000")
return
}
ctx := context.TODO()
scheme := "http://"
if m.cfg.UseHTTPS {
scheme = "https://"
}
reqURL := fmt.Sprintf("%s%s/batch", scheme, DefaultRsHost)
params := map[string][]string{
"op": operations,
}
err = m.client.CallWithForm(ctx, &batchOpRet, "POST", reqURL, params)
return
}
// Fetch 根据提供的远程资源链接来抓取一个文件到空间并已指定文件名保存
func (m *BucketManager) Fetch(resURL, bucket, key string) (fetchRet FetchRet, err error) {
ctx := context.TODO()
reqHost, reqErr := m.iovipHost(bucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, uriFetch(resURL, bucket, key))
err = m.client.Call(ctx, &fetchRet, "POST", reqURL)
return
}
// FetchWithoutKey 根据提供的远程资源链接来抓取一个文件到空间并以文件的内容hash作为文件名
func (m *BucketManager) FetchWithoutKey(resURL, bucket string) (fetchRet FetchRet, err error) {
ctx := context.TODO()
reqHost, reqErr := m.iovipHost(bucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, uriFetchWithoutKey(resURL, bucket))
err = m.client.Call(ctx, &fetchRet, "POST", reqURL)
return
}
// Prefetch 用来同步镜像空间的资源和镜像源资源内容
func (m *BucketManager) Prefetch(bucket, key string) (err error) {
ctx := context.TODO()
reqHost, reqErr := m.iovipHost(bucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, uriPrefetch(bucket, key))
err = m.client.Call(ctx, nil, "POST", reqURL)
return
}
// SetImage 用来设置空间镜像源
func (m *BucketManager) SetImage(siteURL, bucket string) (err error) {
ctx := context.TODO()
reqURL := fmt.Sprintf("http://%s%s", DefaultPubHost, uriSetImage(siteURL, bucket))
err = m.client.Call(ctx, nil, "POST", reqURL)
return
}
// SetImageWithHost 用来设置空间镜像源额外添加回源Host头部
func (m *BucketManager) SetImageWithHost(siteURL, bucket, host string) (err error) {
ctx := context.TODO()
reqURL := fmt.Sprintf("http://%s%s", DefaultPubHost,
uriSetImageWithHost(siteURL, bucket, host))
err = m.client.Call(ctx, nil, "POST", reqURL)
return
}
// UnsetImage 用来取消空间镜像源设置
func (m *BucketManager) UnsetImage(bucket string) (err error) {
ctx := context.TODO()
reqURL := fmt.Sprintf("http://%s%s", DefaultPubHost, uriUnsetImage(bucket))
err = m.client.Call(ctx, nil, "POST", reqURL)
return err
}
type listFilesRet struct {
Marker string `json:"marker"`
Items []ListItem `json:"items"`
CommonPrefixes []string `json:"commonPrefixes"`
}
// ListFiles 用来获取空间文件列表,可以根据需要指定文件的前缀 prefix文件的目录 delimiter循环列举的时候下次
// 列举的位置 marker以及每次返回的文件的最大数量limit其中limit最大为1000。
func (m *BucketManager) ListFiles(bucket, prefix, delimiter, marker string,
limit int) (entries []ListItem, commonPrefixes []string, nextMarker string, hasNext bool, err error) {
if limit <= 0 || limit > 1000 {
err = errors.New("invalid list limit, only allow [1, 1000]")
return
}
ctx := context.TODO()
reqHost, reqErr := m.rsfHost(bucket)
if reqErr != nil {
err = reqErr
return
}
ret := listFilesRet{}
reqURL := fmt.Sprintf("%s%s", reqHost, uriListFiles(bucket, prefix, delimiter, marker, limit))
err = m.client.Call(ctx, &ret, "POST", reqURL)
if err != nil {
return
}
commonPrefixes = ret.CommonPrefixes
nextMarker = ret.Marker
entries = ret.Items
if ret.Marker != "" {
hasNext = true
}
return
}
func (m *BucketManager) rsHost(bucket string) (rsHost string, err error) {
var zone *Zone
if m.cfg.Zone != nil {
zone = m.cfg.Zone
} else {
if v, zoneErr := GetZone(m.mac.AccessKey, bucket); zoneErr != nil {
err = zoneErr
return
} else {
zone = v
}
}
scheme := "http://"
if m.cfg.UseHTTPS {
scheme = "https://"
}
rsHost = fmt.Sprintf("%s%s", scheme, zone.RsHost)
return
}
func (m *BucketManager) rsfHost(bucket string) (rsfHost string, err error) {
var zone *Zone
if m.cfg.Zone != nil {
zone = m.cfg.Zone
} else {
if v, zoneErr := GetZone(m.mac.AccessKey, bucket); zoneErr != nil {
err = zoneErr
return
} else {
zone = v
}
}
scheme := "http://"
if m.cfg.UseHTTPS {
scheme = "https://"
}
rsfHost = fmt.Sprintf("%s%s", scheme, zone.RsfHost)
return
}
func (m *BucketManager) iovipHost(bucket string) (iovipHost string, err error) {
var zone *Zone
if m.cfg.Zone != nil {
zone = m.cfg.Zone
} else {
if v, zoneErr := GetZone(m.mac.AccessKey, bucket); zoneErr != nil {
err = zoneErr
return
} else {
zone = v
}
}
scheme := "http://"
if m.cfg.UseHTTPS {
scheme = "https://"
}
iovipHost = fmt.Sprintf("%s%s", scheme, zone.IovipHost)
return
}
// 构建op的方法导出的方法支持在Batch操作中使用
// URIStat 构建 stat 接口的请求命令
func URIStat(bucket, key string) string {
return fmt.Sprintf("/stat/%s", EncodedEntry(bucket, key))
}
// URIDelete 构建 delete 接口的请求命令
func URIDelete(bucket, key string) string {
return fmt.Sprintf("/delete/%s", EncodedEntry(bucket, key))
}
// URICopy 构建 copy 接口的请求命令
func URICopy(srcBucket, srcKey, destBucket, destKey string, force bool) string {
return fmt.Sprintf("/copy/%s/%s/force/%v", EncodedEntry(srcBucket, srcKey),
EncodedEntry(destBucket, destKey), force)
}
// URIMove 构建 move 接口的请求命令
func URIMove(srcBucket, srcKey, destBucket, destKey string, force bool) string {
return fmt.Sprintf("/move/%s/%s/force/%v", EncodedEntry(srcBucket, srcKey),
EncodedEntry(destBucket, destKey), force)
}
// URIDeleteAfterDays 构建 deleteAfterDays 接口的请求命令
func URIDeleteAfterDays(bucket, key string, days int) string {
return fmt.Sprintf("/deleteAfterDays/%s/%d", EncodedEntry(bucket, key), days)
}
// URIChangeMime 构建 chgm 接口的请求命令
func URIChangeMime(bucket, key, newMime string) string {
return fmt.Sprintf("/chgm/%s/mime/%s", EncodedEntry(bucket, key),
base64.URLEncoding.EncodeToString([]byte(newMime)))
}
// URIChangeType 构建 chtype 接口的请求命令
func URIChangeType(bucket, key string, fileType int) string {
return fmt.Sprintf("/chtype/%s/type/%d", EncodedEntry(bucket, key), fileType)
}
// 构建op的方法非导出的方法无法用在Batch操作中
func uriFetch(resURL, bucket, key string) string {
return fmt.Sprintf("/fetch/%s/to/%s",
base64.URLEncoding.EncodeToString([]byte(resURL)), EncodedEntry(bucket, key))
}
func uriFetchWithoutKey(resURL, bucket string) string {
return fmt.Sprintf("/fetch/%s/to/%s",
base64.URLEncoding.EncodeToString([]byte(resURL)), EncodedEntryWithoutKey(bucket))
}
func uriPrefetch(bucket, key string) string {
return fmt.Sprintf("/prefetch/%s", EncodedEntry(bucket, key))
}
func uriSetImage(siteURL, bucket string) string {
return fmt.Sprintf("/image/%s/from/%s", bucket,
base64.URLEncoding.EncodeToString([]byte(siteURL)))
}
func uriSetImageWithHost(siteURL, bucket, host string) string {
return fmt.Sprintf("/image/%s/from/%s/host/%s", bucket,
base64.URLEncoding.EncodeToString([]byte(siteURL)),
base64.URLEncoding.EncodeToString([]byte(host)))
}
func uriUnsetImage(bucket string) string {
return fmt.Sprintf("/unimage/%s", bucket)
}
func uriListFiles(bucket, prefix, delimiter, marker string, limit int) string {
query := make(url.Values)
query.Add("bucket", bucket)
if prefix != "" {
query.Add("prefix", prefix)
}
if delimiter != "" {
query.Add("delimiter", delimiter)
}
if marker != "" {
query.Add("marker", marker)
}
if limit > 0 {
query.Add("limit", strconv.FormatInt(int64(limit), 10))
}
return fmt.Sprintf("/list?%s", query.Encode())
}
// EncodedEntry 生成URL Safe Base64编码的 Entry
func EncodedEntry(bucket, key string) string {
entry := fmt.Sprintf("%s:%s", bucket, key)
return base64.URLEncoding.EncodeToString([]byte(entry))
}
// EncodedEntryWithoutKey 生成 key 为null的情况下 URL Safe Base64编码的Entry
func EncodedEntryWithoutKey(bucket string) string {
return base64.URLEncoding.EncodeToString([]byte(bucket))
}
// MakePublicURL 用来生成公开空间资源下载链接
func MakePublicURL(domain, key string) (finalUrl string) {
srcUrl := fmt.Sprintf("%s/%s", domain, key)
srcUri, _ := url.Parse(srcUrl)
finalUrl = srcUri.String()
return
}
// MakePrivateURL 用来生成私有空间资源下载链接
func MakePrivateURL(mac *qbox.Mac, domain, key string, deadline int64) (privateURL string) {
publicURL := MakePublicURL(domain, key)
urlToSign := publicURL
if strings.Contains(publicURL, "?") {
urlToSign = fmt.Sprintf("%s&e=%d", urlToSign, deadline)
} else {
urlToSign = fmt.Sprintf("%s?e=%d", urlToSign, deadline)
}
token := mac.Sign([]byte(urlToSign))
privateURL = fmt.Sprintf("%s&token=%s", urlToSign, token)
return
}

View File

@@ -1,264 +0,0 @@
package storage
import (
"fmt"
"math/rand"
"net/http"
"os"
"testing"
"time"
"github.com/qiniu/api.v7/auth/qbox"
)
var (
testAK = os.Getenv("QINIU_ACCESS_KEY")
testSK = os.Getenv("QINIU_SECRET_KEY")
testBucket = os.Getenv("QINIU_TEST_BUCKET")
testBucketPrivate = os.Getenv("QINIU_TEST_BUCKET_PRIVATE")
testBucketPrivateDomain = os.Getenv("QINIU_TEST_DOMAIN_PRIVATE")
testPipeline = os.Getenv("QINIU_TEST_PIPELINE")
testKey = "qiniu.png"
testFetchUrl = "http://devtools.qiniu.com/qiniu.png"
testSiteUrl = "http://devtools.qiniu.com"
)
var mac *qbox.Mac
var bucketManager *BucketManager
var operationManager *OperationManager
var formUploader *FormUploader
var resumeUploader *ResumeUploader
var base64Uploader *Base64Uploader
func init() {
if testAK == "" || testSK == "" {
panic("please run ./test-env.sh first")
}
mac = qbox.NewMac(testAK, testSK)
cfg := Config{}
cfg.Zone = &Zone_z0
cfg.UseCdnDomains = true
bucketManager = NewBucketManager(mac, &cfg)
operationManager = NewOperationManager(mac, &cfg)
formUploader = NewFormUploader(&cfg)
resumeUploader = NewResumeUploader(&cfg)
base64Uploader = NewBase64Uploader(&cfg)
rand.Seed(time.Now().Unix())
}
//Test get zone
func TestGetZone(t *testing.T) {
zone, err := GetZone(testAK, testBucket)
if err != nil {
t.Fatalf("GetZone() error, %s", err)
}
t.Log(zone.String())
}
//Test get bucket list
func TestBuckets(t *testing.T) {
shared := true
buckets, err := bucketManager.Buckets(shared)
if err != nil {
t.Fatalf("Buckets() error, %s", err)
}
for _, bucket := range buckets {
t.Log(bucket)
}
}
//Test get file info
func TestStat(t *testing.T) {
keysToStat := []string{"qiniu.png"}
for _, eachKey := range keysToStat {
info, err := bucketManager.Stat(testBucket, eachKey)
if err != nil {
t.Logf("Stat() error, %s", err)
t.Fail()
} else {
t.Logf("FileInfo:\n %s", info.String())
}
}
}
func TestCopyMoveDelete(t *testing.T) {
keysCopyTarget := []string{"qiniu_1.png", "qiniu_2.png", "qiniu_3.png"}
keysToDelete := make([]string, 0, len(keysCopyTarget))
for _, eachKey := range keysCopyTarget {
err := bucketManager.Copy(testBucket, testKey, testBucket, eachKey, true)
if err != nil {
t.Logf("Copy() error, %s", err)
t.Fail()
}
}
for _, eachKey := range keysCopyTarget {
keyToDelete := eachKey + "_move"
err := bucketManager.Move(testBucket, eachKey, testBucket, keyToDelete, true)
if err != nil {
t.Logf("Move() error, %s", err)
t.Fail()
} else {
keysToDelete = append(keysToDelete, keyToDelete)
}
}
for _, eachKey := range keysToDelete {
err := bucketManager.Delete(testBucket, eachKey)
if err != nil {
t.Logf("Delete() error, %s", err)
t.Fail()
}
}
}
func TestFetch(t *testing.T) {
ret, err := bucketManager.Fetch(testFetchUrl, testBucket, "qiniu-fetch.png")
if err != nil {
t.Logf("Fetch() error, %s", err)
t.Fail()
} else {
t.Logf("FetchRet:\n %s", ret.String())
}
}
func TestFetchWithoutKey(t *testing.T) {
ret, err := bucketManager.FetchWithoutKey(testFetchUrl, testBucket)
if err != nil {
t.Logf("FetchWithoutKey() error, %s", err)
t.Fail()
} else {
t.Logf("FetchRet:\n %s", ret.String())
}
}
func TestDeleteAfterDays(t *testing.T) {
deleteKey := testKey + "_deleteAfterDays"
days := 7
bucketManager.Copy(testBucket, testKey, testBucket, deleteKey, true)
err := bucketManager.DeleteAfterDays(testBucket, deleteKey, days)
if err != nil {
t.Logf("DeleteAfterDays() error, %s", err)
t.Fail()
}
}
func TestChangeMime(t *testing.T) {
toChangeKey := testKey + "_changeMime"
bucketManager.Copy(testBucket, testKey, testBucket, toChangeKey, true)
newMime := "text/plain"
err := bucketManager.ChangeMime(testBucket, toChangeKey, newMime)
if err != nil {
t.Fatalf("ChangeMime() error, %s", err)
}
info, err := bucketManager.Stat(testBucket, toChangeKey)
if err != nil || info.MimeType != newMime {
t.Fatalf("ChangeMime() failed, %s", err)
}
bucketManager.Delete(testBucket, toChangeKey)
}
func TestChangeType(t *testing.T) {
toChangeKey := fmt.Sprintf("%s_changeType_%d", testKey, rand.Int())
bucketManager.Copy(testBucket, testKey, testBucket, toChangeKey, true)
fileType := 1
err := bucketManager.ChangeType(testBucket, toChangeKey, fileType)
if err != nil {
t.Fatalf("ChangeType() error, %s", err)
}
info, err := bucketManager.Stat(testBucket, toChangeKey)
if err != nil || info.Type != fileType {
t.Fatalf("ChangeMime() failed, %s", err)
}
bucketManager.Delete(testBucket, toChangeKey)
}
func TestPrefetchAndImage(t *testing.T) {
err := bucketManager.SetImage(testSiteUrl, testBucket)
if err != nil {
t.Fatalf("SetImage() error, %s", err)
}
err = bucketManager.Prefetch(testBucket, testKey)
if err != nil {
t.Fatalf("Prefetch() error, %s", err)
}
err = bucketManager.UnsetImage(testBucket)
if err != nil {
t.Fatalf("UnsetImage() error, %s", err)
}
}
func TestListFiles(t *testing.T) {
limit := 100
prefix := "listfiles/"
for i := 0; i < limit; i++ {
newKey := fmt.Sprintf("%s%s/%d", prefix, testKey, i)
bucketManager.Copy(testBucket, testKey, testBucket, newKey, true)
}
entries, _, _, hasNext, err := bucketManager.ListFiles(testBucket, prefix, "", "", limit)
if err != nil {
t.Fatalf("ListFiles() error, %s", err)
}
if hasNext {
t.Fatalf("ListFiles() failed, unexpected hasNext")
}
if len(entries) != limit {
t.Fatalf("ListFiles() failed, unexpected items count, expected: %d, actual: %d", limit, len(entries))
}
for _, entry := range entries {
t.Logf("ListItem:\n%s", entry.String())
}
}
func TestMakePrivateUrl(t *testing.T) {
deadline := time.Now().Add(time.Second * 3600).Unix()
privateURL := MakePrivateURL(mac, "http://"+testBucketPrivateDomain, testKey, deadline)
t.Logf("PrivateUrl: %s", privateURL)
resp, respErr := http.Get(privateURL)
if respErr != nil {
t.Fatalf("MakePrivateUrl() error, %s", respErr)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("MakePrivateUrl() error, %s", resp.Status)
}
}
func TestBatch(t *testing.T) {
copyCnt := 100
copyOps := make([]string, 0, copyCnt)
testKeys := make([]string, 0, copyCnt)
for i := 0; i < copyCnt; i++ {
cpKey := fmt.Sprintf("%s_batchcopy_%d", testKey, i)
testKeys = append(testKeys, cpKey)
copyOps = append(copyOps, URICopy(testBucket, testKey, testBucket, cpKey, true))
}
_, bErr := bucketManager.Batch(copyOps)
if bErr != nil {
t.Fatalf("BatchCopy error, %s", bErr)
}
statOps := make([]string, 0, copyCnt)
for _, k := range testKeys {
statOps = append(statOps, URIStat(testBucket, k))
}
batchOpRets, bErr := bucketManager.Batch(statOps)
_, bErr = bucketManager.Batch(copyOps)
if bErr != nil {
t.Fatalf("BatchStat error, %s", bErr)
}
t.Logf("BatchStat: %v", batchOpRets)
}

View File

@@ -1,38 +0,0 @@
package storage
import (
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/x/rpc.v7"
"net/http"
)
type Transport struct {
mac qbox.Mac
Transport http.RoundTripper
}
func (t *Transport) NestedObject() interface{} {
return t.Transport
}
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
token, err := t.mac.SignRequest(req)
if err != nil {
return
}
req.Header.Set("Authorization", "QBox "+token)
return t.Transport.RoundTrip(req)
}
func NewTransport(mac *qbox.Mac, transport http.RoundTripper) *Transport {
if transport == nil {
transport = http.DefaultTransport
}
t := &Transport{mac: *mac, Transport: transport}
return t
}
func NewClient(mac *qbox.Mac, transport http.RoundTripper) *rpc.Client {
t := NewTransport(mac, transport)
return &rpc.Client{&http.Client{Transport: t}}
}

View File

@@ -1,8 +0,0 @@
package storage
// Config 为文件上传,资源管理等配置
type Config struct {
Zone *Zone //空间所在的机房
UseHTTPS bool //是否使用https域名
UseCdnDomains bool //是否使用cdn加速域名
}

View File

@@ -1,10 +0,0 @@
// storage 包提供了资源的上传,管理,数据处理等功能。其中资源的上传又提供了表单上传的方式以及分片上传的方式,其中分片上传的方式还支持断点续传。
//
// 该包中提供了 BucketManager 用来进行资源管理,比如获取文件信息,文件复制,删除,重命名等,以及很多高级功能如修改文件类型,
// 修改文件的生命周期,修改文件的存储类型等。
//
// 该包中还提供了 FormUploader 和 ResumeUploader 来分别支持表单上传和分片上传断点续传等功能对于较大的文件比如100MB以上的文件一般
// 建议采用分片上传的方式来保证上传的效率和可靠性。
//
// 对于数据处理,则提供了 OperationManager可以使用它来发送持久化数据处理请求及查询数据处理的状态。
package storage

View File

@@ -1,345 +0,0 @@
package storage
import (
"bytes"
"context"
"fmt"
"hash"
"hash/crc32"
"io"
"mime/multipart"
"net/textproto"
"os"
"path"
"path/filepath"
"strings"
"github.com/qiniu/x/rpc.v7"
)
// PutExtra 为表单上传的额外可选项
type PutExtra struct {
// 可选,用户自定义参数,必须以 "x:" 开头。若不以x:开头,则忽略。
Params map[string]string
// 可选,当为 "" 时候,服务端自动判断。
MimeType string
// 上传事件:进度通知。这个事件的回调函数应该尽可能快地结束。
OnProgress func(fsize, uploaded int64)
}
// PutRet 为七牛标准的上传回复内容。
// 如果使用了上传回调或者自定义了returnBody那么需要根据实际情况自己自定义一个返回值结构体
type PutRet struct {
Hash string `json:"hash"`
PersistentID string `json:"persistentId"`
Key string `json:"key"`
}
// FormUploader 表示一个表单上传的对象
type FormUploader struct {
client *rpc.Client
cfg *Config
}
// NewFormUploader 用来构建一个表单上传的对象
func NewFormUploader(cfg *Config) *FormUploader {
if cfg == nil {
cfg = &Config{}
}
return &FormUploader{
client: &rpc.DefaultClient,
cfg: cfg,
}
}
// NewFormUploaderEx 用来构建一个表单上传的对象
func NewFormUploaderEx(cfg *Config, client *rpc.Client) *FormUploader {
if cfg == nil {
cfg = &Config{}
}
if client == nil {
client = &rpc.DefaultClient
}
return &FormUploader{
client: client,
cfg: cfg,
}
}
// PutFile 用来以表单方式上传一个文件,和 Put 不同的只是一个通过提供文件路径来访问文件内容,一个通过 io.Reader 来访问。
//
// ctx 是请求的上下文。
// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 callbackUrl 或 returnBody那么返回的数据结构是 PutRet 结构。
// uptoken 是由业务服务器颁发的上传凭证。
// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外key 为空字符串是合法的。
// localFile 是要上传的文件的本地路径。
// extra 是上传的一些可选项可以指定为nil。详细见 PutExtra 结构的描述。
//
func (p *FormUploader) PutFile(
ctx context.Context, ret interface{}, uptoken, key, localFile string, extra *PutExtra) (err error) {
return p.putFile(ctx, ret, uptoken, key, true, localFile, extra)
}
// PutFileWithoutKey 用来以表单方式上传一个文件。不指定文件上传后保存的key的情况下文件命名方式首先看看
// uptoken 中是否设置了 saveKey如果设置了 saveKey那么按 saveKey 要求的规则生成 key否则自动以文件的 hash 做 key。
// 和 Put 不同的只是一个通过提供文件路径来访问文件内容,一个通过 io.Reader 来访问。
//
// ctx 是请求的上下文。
// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 CallbackUrl 或 ReturnBody那么返回的数据结构是 PutRet 结构。
// uptoken 是由业务服务器颁发的上传凭证。
// localFile 是要上传的文件的本地路径。
// extra 是上传的一些可选项。可以指定为nil。详细见 PutExtra 结构的描述。
//
func (p *FormUploader) PutFileWithoutKey(
ctx context.Context, ret interface{}, uptoken, localFile string, extra *PutExtra) (err error) {
return p.putFile(ctx, ret, uptoken, "", false, localFile, extra)
}
func (p *FormUploader) putFile(
ctx context.Context, ret interface{}, uptoken string,
key string, hasKey bool, localFile string, extra *PutExtra) (err error) {
f, err := os.Open(localFile)
if err != nil {
return
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return
}
fsize := fi.Size()
if extra == nil {
extra = &PutExtra{}
}
return p.put(ctx, ret, uptoken, key, hasKey, f, fsize, extra, filepath.Base(localFile))
}
// Put 用来以表单方式上传一个文件。
//
// ctx 是请求的上下文。
// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 callbackUrl 或 returnBody那么返回的数据结构是 PutRet 结构。
// uptoken 是由业务服务器颁发的上传凭证。
// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外key 为空字符串是合法的。
// data 是文件内容的访问接口io.Reader
// fsize 是要上传的文件大小。
// extra 是上传的一些可选项。可以指定为nil。详细见 PutExtra 结构的描述。
//
func (p *FormUploader) Put(
ctx context.Context, ret interface{}, uptoken, key string, data io.Reader, size int64, extra *PutExtra) (err error) {
err = p.put(ctx, ret, uptoken, key, true, data, size, extra, path.Base(key))
return
}
// PutWithoutKey 用来以表单方式上传一个文件。不指定文件上传后保存的key的情况下文件命名方式首先看看 uptoken 中是否设置了 saveKey
// 如果设置了 saveKey那么按 saveKey 要求的规则生成 key否则自动以文件的 hash 做 key。
//
// ctx 是请求的上下文。
// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 CallbackUrl 或 ReturnBody那么返回的数据结构是 PutRet 结构。
// uptoken 是由业务服务器颁发的上传凭证。
// data 是文件内容的访问接口io.Reader
// fsize 是要上传的文件大小。
// extra 是上传的一些可选项。详细见 PutExtra 结构的描述。
//
func (p *FormUploader) PutWithoutKey(
ctx context.Context, ret interface{}, uptoken string, data io.Reader, size int64, extra *PutExtra) (err error) {
err = p.put(ctx, ret, uptoken, "", false, data, size, extra, "filename")
return err
}
func (p *FormUploader) put(
ctx context.Context, ret interface{}, uptoken string,
key string, hasKey bool, data io.Reader, size int64, extra *PutExtra, fileName string) (err error) {
ak, bucket, gErr := getAkBucketFromUploadToken(uptoken)
if gErr != nil {
err = gErr
return
}
var upHost string
upHost, err = p.upHost(ak, bucket)
if err != nil {
return
}
var b bytes.Buffer
writer := multipart.NewWriter(&b)
if extra == nil {
extra = &PutExtra{}
}
if extra.OnProgress != nil {
data = &readerWithProgress{reader: data, fsize: size, onProgress: extra.OnProgress}
}
err = writeMultipart(writer, uptoken, key, hasKey, extra, fileName)
if err != nil {
return
}
var dataReader io.Reader
h := crc32.NewIEEE()
dataReader = io.TeeReader(data, h)
crcReader := newCrc32Reader(writer.Boundary(), h)
//write file
head := make(textproto.MIMEHeader)
head.Set("Content-Disposition", fmt.Sprintf(`form-data; name="file"; filename="%s"`,
escapeQuotes(fileName)))
if extra.MimeType != "" {
head.Set("Content-Type", extra.MimeType)
}
_, err = writer.CreatePart(head)
if err != nil {
return
}
lastLine := fmt.Sprintf("\r\n--%s--\r\n", writer.Boundary())
r := strings.NewReader(lastLine)
bodyLen := int64(-1)
if size >= 0 {
bodyLen = int64(b.Len()) + size + int64(len(lastLine))
bodyLen += crcReader.length()
}
mr := io.MultiReader(&b, dataReader, crcReader, r)
contentType := writer.FormDataContentType()
err = p.client.CallWith64(ctx, ret, "POST", upHost, contentType, mr, bodyLen)
if err != nil {
return
}
if extra.OnProgress != nil {
extra.OnProgress(size, size)
}
return
}
type crc32Reader struct {
h hash.Hash32
boundary string
r io.Reader
flag bool
nlDashBoundaryNl string
header string
crc32PadLen int64
}
func newCrc32Reader(boundary string, h hash.Hash32) *crc32Reader {
nlDashBoundaryNl := fmt.Sprintf("\r\n--%s\r\n", boundary)
header := `Content-Disposition: form-data; name="crc32"` + "\r\n\r\n"
return &crc32Reader{
h: h,
boundary: boundary,
nlDashBoundaryNl: nlDashBoundaryNl,
header: header,
crc32PadLen: 10,
}
}
func (r *crc32Reader) Read(p []byte) (int, error) {
if r.flag == false {
crc32 := r.h.Sum32()
crc32Line := r.nlDashBoundaryNl + r.header + fmt.Sprintf("%010d", crc32) //padding crc32 results to 10 digits
r.r = strings.NewReader(crc32Line)
r.flag = true
}
return r.r.Read(p)
}
func (r crc32Reader) length() (length int64) {
return int64(len(r.nlDashBoundaryNl+r.header)) + r.crc32PadLen
}
func (p *FormUploader) upHost(ak, bucket string) (upHost string, err error) {
var zone *Zone
if p.cfg.Zone != nil {
zone = p.cfg.Zone
} else {
if v, zoneErr := GetZone(ak, bucket); zoneErr != nil {
err = zoneErr
return
} else {
zone = v
}
}
scheme := "http://"
if p.cfg.UseHTTPS {
scheme = "https://"
}
host := zone.SrcUpHosts[0]
if p.cfg.UseCdnDomains {
host = zone.CdnUpHosts[0]
}
upHost = fmt.Sprintf("%s%s", scheme, host)
return
}
type readerWithProgress struct {
reader io.Reader
uploaded int64
fsize int64
onProgress func(fsize, uploaded int64)
}
func (p *readerWithProgress) Read(b []byte) (n int, err error) {
if p.uploaded > 0 {
p.onProgress(p.fsize, p.uploaded)
}
n, err = p.reader.Read(b)
p.uploaded += int64(n)
return
}
func writeMultipart(writer *multipart.Writer, uptoken, key string, hasKey bool,
extra *PutExtra, fileName string) (err error) {
//token
if err = writer.WriteField("token", uptoken); err != nil {
return
}
//key
if hasKey {
if err = writer.WriteField("key", key); err != nil {
return
}
}
//extra.Params
if extra.Params != nil {
for k, v := range extra.Params {
if strings.HasPrefix(k, "x:") && v != "" {
err = writer.WriteField(k, v)
if err != nil {
return
}
}
}
}
return err
}
var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
func escapeQuotes(s string) string {
return quoteEscaper.Replace(s)
}

View File

@@ -1,31 +0,0 @@
package storage
import (
"context"
"fmt"
"math/rand"
"os"
"path/filepath"
"testing"
)
var (
testLocalFile = filepath.Join(os.Getenv("TRAVIS_BUILD_DIR"), "Makefile")
)
func TestFormUploadPutFile(t *testing.T) {
var putRet PutRet
ctx := context.TODO()
putPolicy := PutPolicy{
Scope: testBucket,
DeleteAfterDays: 7,
}
upToken := putPolicy.UploadToken(mac)
testKey := fmt.Sprintf("testPutFileKey_%d", rand.Int())
err := formUploader.PutFile(ctx, &putRet, upToken, testKey, testLocalFile, nil)
if err != nil {
t.Fatalf("FormUploader#PutFile() error, %s", err)
}
t.Logf("Key: %s, Hash:%s", putRet.Key, putRet.Hash)
}

View File

@@ -1,5 +0,0 @@
package storage
import (
_ "github.com/qiniu/api.v7/conf"
)

View File

@@ -1,206 +0,0 @@
package storage
import (
"context"
"fmt"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/x/rpc.v7"
)
// OperationManager 提供了数据处理相关的方法
type OperationManager struct {
client *rpc.Client
mac *qbox.Mac
cfg *Config
}
// NewOperationManager 用来构建一个新的数据处理对象
func NewOperationManager(mac *qbox.Mac, cfg *Config) *OperationManager {
if cfg == nil {
cfg = &Config{}
}
return &OperationManager{
client: NewClient(mac, nil),
mac: mac,
cfg: cfg,
}
}
// NewOperationManager 用来构建一个新的数据处理对象
func NewOperationManagerEx(mac *qbox.Mac, cfg *Config, client *rpc.Client) *OperationManager {
if cfg == nil {
cfg = &Config{}
}
if client == nil {
client = NewClient(mac, nil)
}
return &OperationManager{
client: client,
mac: mac,
cfg: cfg,
}
}
// PfopRet 为数据处理请求的回复内容
type PfopRet struct {
PersistentID string `json:"persistentId,omitempty"`
}
// PrefopRet 为数据处理请求的状态查询回复内容
type PrefopRet struct {
ID string `json:"id"`
Code int `json:"code"`
Desc string `json:"desc"`
InputBucket string `json:"inputBucket,omitempty"`
InputKey string `json:"inputKey,omitempty"`
Pipeline string `json:"pipeline,omitempty"`
Reqid string `json:"reqid,omitempty"`
Items []FopResult
}
func (r *PrefopRet) String() string {
strData := fmt.Sprintf("Id: %s\r\nCode: %d\r\nDesc: %s\r\n", r.ID, r.Code, r.Desc)
if r.InputBucket != "" {
strData += fmt.Sprintln(fmt.Sprintf("InputBucket: %s", r.InputBucket))
}
if r.InputKey != "" {
strData += fmt.Sprintln(fmt.Sprintf("InputKey: %s", r.InputKey))
}
if r.Pipeline != "" {
strData += fmt.Sprintln(fmt.Sprintf("Pipeline: %s", r.Pipeline))
}
if r.Reqid != "" {
strData += fmt.Sprintln(fmt.Sprintf("Reqid: %s", r.Reqid))
}
strData = fmt.Sprintln(strData)
for _, item := range r.Items {
strData += fmt.Sprintf("\tCmd:\t%s\r\n\tCode:\t%d\r\n\tDesc:\t%s\r\n", item.Cmd, item.Code, item.Desc)
if item.Error != "" {
strData += fmt.Sprintf("\tError:\t%s\r\n", item.Error)
} else {
if item.Hash != "" {
strData += fmt.Sprintf("\tHash:\t%s\r\n", item.Hash)
}
if item.Key != "" {
strData += fmt.Sprintf("\tKey:\t%s\r\n", item.Key)
}
if item.Keys != nil {
if len(item.Keys) > 0 {
strData += "\tKeys: {\r\n"
for _, key := range item.Keys {
strData += fmt.Sprintf("\t\t%s\r\n", key)
}
strData += "\t}\r\n"
}
}
}
strData += "\r\n"
}
return strData
}
// FopResult 云处理操作列表,包含每个云处理操作的状态信息
type FopResult struct {
Cmd string `json:"cmd"`
Code int `json:"code"`
Desc string `json:"desc"`
Error string `json:"error,omitempty"`
Hash string `json:"hash,omitempty"`
Key string `json:"key,omitempty"`
Keys []string `json:"keys,omitempty"`
}
// Pfop 持久化数据处理
//
// bucket 资源空间
// key 源资源名
// fops 云处理操作列表,
// notifyURL 处理结果通知接收URL
// pipeline 多媒体处理队列名称
// force 强制执行数据处理
//
func (m *OperationManager) Pfop(bucket, key, fops, pipeline, notifyURL string,
force bool) (persistentID string, err error) {
pfopParams := map[string][]string{
"bucket": []string{bucket},
"key": []string{key},
"fops": []string{fops},
}
if pipeline != "" {
pfopParams["pipeline"] = []string{pipeline}
}
if notifyURL != "" {
pfopParams["notifyURL"] = []string{notifyURL}
}
if force {
pfopParams["force"] = []string{"1"}
}
var ret PfopRet
ctx := context.TODO()
reqHost, reqErr := m.apiHost(bucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s/pfop/", reqHost)
err = m.client.CallWithForm(ctx, &ret, "POST", reqURL, pfopParams)
if err != nil {
return
}
persistentID = ret.PersistentID
return
}
// Prefop 持久化处理状态查询
func (m *OperationManager) Prefop(persistentID string) (ret PrefopRet, err error) {
ctx := context.TODO()
reqHost := m.prefopApiHost(persistentID)
reqURL := fmt.Sprintf("%s/status/get/prefop?id=%s", reqHost, persistentID)
err = m.client.Call(ctx, &ret, "GET", reqURL)
return
}
func (m *OperationManager) apiHost(bucket string) (apiHost string, err error) {
var zone *Zone
if m.cfg.Zone != nil {
zone = m.cfg.Zone
} else {
if v, zoneErr := GetZone(m.mac.AccessKey, bucket); zoneErr != nil {
err = zoneErr
return
} else {
zone = v
}
}
scheme := "http://"
if m.cfg.UseHTTPS {
scheme = "https://"
}
apiHost = fmt.Sprintf("%s%s", scheme, zone.ApiHost)
return
}
func (m *OperationManager) prefopApiHost(persistentID string) (apiHost string) {
apiHost = "api.qiniu.com"
if m.cfg.Zone != nil {
apiHost = m.cfg.Zone.ApiHost
}
if m.cfg.UseHTTPS {
apiHost = fmt.Sprintf("https://%s", apiHost)
} else {
apiHost = fmt.Sprintf("http://%s", apiHost)
}
return
}

View File

@@ -1,45 +0,0 @@
package storage
import (
"encoding/base64"
"fmt"
"strings"
"testing"
)
var (
testVideoKey = "qiniu.mp4"
)
func TestPrefop(t *testing.T) {
pid := "na0.597802c092129336c20f3f91"
prefopRet, err := operationManager.Prefop(pid)
if err != nil {
t.Fatalf("Prefop() error, %s", err)
}
t.Logf("%s", prefopRet.String())
}
func TestPfop(t *testing.T) {
saveBucket := testBucket
fopAvthumb := fmt.Sprintf("avthumb/mp4/s/480x320/vb/500k|saveas/%s",
EncodedEntry(saveBucket, "pfop_test_qiniu.mp4"))
fopVframe := fmt.Sprintf("vframe/jpg/offset/10|saveas/%s",
EncodedEntry(saveBucket, "pfop_test_qiniu.jpg"))
fopVsample := fmt.Sprintf("vsample/jpg/interval/20/pattern/%s",
base64.URLEncoding.EncodeToString([]byte("pfop_test_$(count).jpg")))
fopBatch := []string{fopAvthumb, fopVframe, fopVsample}
fops := strings.Join(fopBatch, ";")
force := true
notifyURL := ""
pid, err := operationManager.Pfop(testBucket, testVideoKey, fops,
testPipeline, notifyURL, force)
if err != nil {
t.Fatalf("Pfop() error, %s", err)
}
t.Logf("persistentId: %s", pid)
}

View File

@@ -1,200 +0,0 @@
package storage
import (
"context"
"encoding/base64"
"fmt"
"hash/crc32"
"io"
"net/http"
"strconv"
"github.com/qiniu/x/bytes.v7"
"github.com/qiniu/x/rpc.v7"
"github.com/qiniu/x/xlog.v7"
)
// ResumeUploader 表示一个分片上传的对象
type ResumeUploader struct {
client *rpc.Client
cfg *Config
}
// NewResumeUploader 表示构建一个新的分片上传的对象
func NewResumeUploader(cfg *Config) *ResumeUploader {
if cfg == nil {
cfg = &Config{}
}
return &ResumeUploader{
cfg: cfg,
client: &rpc.DefaultClient,
}
}
// NewResumeUploaderEx 表示构建一个新的分片上传的对象
func NewResumeUploaderEx(cfg *Config, client *rpc.Client) *ResumeUploader {
if cfg == nil {
cfg = &Config{}
}
if client == nil {
client = &rpc.DefaultClient
}
return &ResumeUploader{
client: client,
cfg: cfg,
}
}
// 分片上传请求
type uptokenTransport struct {
token string
Transport http.RoundTripper
}
func (t *uptokenTransport) NestedObject() interface{} {
return t.Transport
}
func (t *uptokenTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
req.Header.Set("Authorization", t.token)
return t.Transport.RoundTrip(req)
}
func newUptokenTransport(token string, transport http.RoundTripper) *uptokenTransport {
if transport == nil {
transport = http.DefaultTransport
}
return &uptokenTransport{"UpToken " + token, transport}
}
func newUptokenClient(client *rpc.Client, token string) *rpc.Client {
t := newUptokenTransport(token, client.Transport)
client.Transport = t
return client
}
// 创建块请求
func (p *ResumeUploader) Mkblk(
ctx context.Context, upHost string, ret *BlkputRet, blockSize int, body io.Reader, size int) error {
url := upHost + "/mkblk/" + strconv.Itoa(blockSize)
return p.client.CallWith(ctx, ret, "POST", url, "application/octet-stream", body, size)
}
// 发送bput请求
func (p *ResumeUploader) Bput(
ctx context.Context, ret *BlkputRet, body io.Reader, size int) error {
url := ret.Host + "/bput/" + ret.Ctx + "/" + strconv.FormatUint(uint64(ret.Offset), 10)
return p.client.CallWith(ctx, ret, "POST", url, "application/octet-stream", body, size)
}
// 分片上传请求
func (p *ResumeUploader) resumableBput(
ctx context.Context, upHost string, ret *BlkputRet, f io.ReaderAt, blkIdx, blkSize int, extra *RputExtra) (err error) {
log := xlog.NewWith(ctx)
h := crc32.NewIEEE()
offbase := int64(blkIdx) << blockBits
chunkSize := extra.ChunkSize
var bodyLength int
if ret.Ctx == "" {
if chunkSize < blkSize {
bodyLength = chunkSize
} else {
bodyLength = blkSize
}
body1 := io.NewSectionReader(f, offbase, int64(bodyLength))
body := io.TeeReader(body1, h)
err = p.Mkblk(ctx, upHost, ret, blkSize, body, bodyLength)
if err != nil {
return
}
if ret.Crc32 != h.Sum32() || int(ret.Offset) != bodyLength {
err = ErrUnmatchedChecksum
return
}
extra.Notify(blkIdx, blkSize, ret)
}
for int(ret.Offset) < blkSize {
if chunkSize < blkSize-int(ret.Offset) {
bodyLength = chunkSize
} else {
bodyLength = blkSize - int(ret.Offset)
}
tryTimes := extra.TryTimes
lzRetry:
h.Reset()
body1 := io.NewSectionReader(f, offbase+int64(ret.Offset), int64(bodyLength))
body := io.TeeReader(body1, h)
err = p.Bput(ctx, ret, body, bodyLength)
if err == nil {
if ret.Crc32 == h.Sum32() {
extra.Notify(blkIdx, blkSize, ret)
continue
}
log.Warn("ResumableBlockput: invalid checksum, retry")
err = ErrUnmatchedChecksum
} else {
if ei, ok := err.(*rpc.ErrorInfo); ok && ei.Code == InvalidCtx {
ret.Ctx = "" // reset
log.Warn("ResumableBlockput: invalid ctx, please retry")
return
}
log.Warn("ResumableBlockput: bput failed -", err)
}
if tryTimes > 1 {
tryTimes--
log.Info("ResumableBlockput retrying ...")
goto lzRetry
}
break
}
return
}
// 创建文件请求
func (p *ResumeUploader) Mkfile(
ctx context.Context, upHost string, ret interface{}, key string, hasKey bool, fsize int64, extra *RputExtra) (err error) {
url := upHost + "/mkfile/" + strconv.FormatInt(fsize, 10)
if extra.MimeType != "" {
url += "/mimeType/" + encode(extra.MimeType)
}
if hasKey {
url += "/key/" + encode(key)
}
for k, v := range extra.Params {
url += fmt.Sprintf("/%s/%s", k, encode(v))
}
buf := make([]byte, 0, 196*len(extra.Progresses))
for _, prog := range extra.Progresses {
buf = append(buf, prog.Ctx...)
buf = append(buf, ',')
}
if len(buf) > 0 {
buf = buf[:len(buf)-1]
}
return p.client.CallWith(
ctx, ret, "POST", url, "application/octet-stream", bytes.NewReader(buf), len(buf))
}
func encode(raw string) string {
return base64.URLEncoding.EncodeToString([]byte(raw))
}

View File

@@ -1,311 +0,0 @@
package storage
import (
"context"
"errors"
"fmt"
"io"
"os"
"sync"
"github.com/qiniu/x/xlog.v7"
)
// 分片上传过程中可能遇到的错误
var (
ErrInvalidPutProgress = errors.New("invalid put progress")
ErrPutFailed = errors.New("resumable put failed")
ErrUnmatchedChecksum = errors.New("unmatched checksum")
ErrBadToken = errors.New("invalid token")
)
// 上传进度过期错误
const (
InvalidCtx = 701 // UP: 无效的上下文(bput)可能情况Ctx非法或者已经被淘汰太久未使用
)
// 分片上传默认参数设置
const (
defaultWorkers = 4 // 默认的并发上传的块数量
defaultChunkSize = 4 * 1024 * 1024 // 默认的分片大小4MB
defaultTryTimes = 3 // bput 失败重试次数
)
// Settings 为分片上传设置
type Settings struct {
TaskQsize int // 可选。任务队列大小。为 0 表示取 Workers * 4。
Workers int // 并行 Goroutine 数目。
ChunkSize int // 默认的Chunk大小不设定则为4M
TryTimes int // 默认的尝试次数不设定则为3
}
// 分片上传的默认设置
var settings = Settings{
TaskQsize: defaultWorkers * 4,
Workers: defaultWorkers,
ChunkSize: defaultChunkSize,
TryTimes: defaultTryTimes,
}
// SetSettings 可以用来设置分片上传参数
func SetSettings(v *Settings) {
settings = *v
if settings.Workers == 0 {
settings.Workers = defaultWorkers
}
if settings.TaskQsize == 0 {
settings.TaskQsize = settings.Workers * 4
}
if settings.ChunkSize == 0 {
settings.ChunkSize = defaultChunkSize
}
if settings.TryTimes == 0 {
settings.TryTimes = defaultTryTimes
}
}
var tasks chan func()
func worker(tasks chan func()) {
for {
task := <-tasks
task()
}
}
func initWorkers() {
tasks = make(chan func(), settings.TaskQsize)
for i := 0; i < settings.Workers; i++ {
go worker(tasks)
}
}
// 上传完毕块之后的回调
func notifyNil(blkIdx int, blkSize int, ret *BlkputRet) {}
func notifyErrNil(blkIdx int, blkSize int, err error) {}
const (
blockBits = 22
blockMask = (1 << blockBits) - 1
)
// BlockCount 用来计算文件的分块数量
func BlockCount(fsize int64) int {
return int((fsize + blockMask) >> blockBits)
}
// BlkputRet 表示分片上传每个片上传完毕的返回值
type BlkputRet struct {
Ctx string `json:"ctx"`
Checksum string `json:"checksum"`
Crc32 uint32 `json:"crc32"`
Offset uint32 `json:"offset"`
Host string `json:"host"`
ExpiredAt int64 `json:"expired_at"`
}
// RputExtra 表示分片上传额外可以指定的参数
type RputExtra struct {
Params map[string]string // 可选。用户自定义参数,以"x:"开头,而且值不能为空,否则忽略
MimeType string // 可选。
ChunkSize int // 可选。每次上传的Chunk大小
TryTimes int // 可选。尝试次数
Progresses []BlkputRet // 可选。上传进度
Notify func(blkIdx int, blkSize int, ret *BlkputRet) // 可选。进度提示注意多个block是并行传输的
NotifyErr func(blkIdx int, blkSize int, err error)
}
var once sync.Once
// Put 方法用来上传一个文件,支持断点续传和分块上传。
//
// ctx 是请求的上下文。
// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 CallbackUrl 或 ReturnBody那么返回的数据结构是 PutRet 结构。
// uptoken 是由业务服务器颁发的上传凭证。
// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外key 为空字符串是合法的。
// f 是文件内容的访问接口。考虑到需要支持分块上传和断点续传,要的是 io.ReaderAt 接口,而不是 io.Reader。
// fsize 是要上传的文件大小。
// extra 是上传的一些可选项。详细见 RputExtra 结构的描述。
//
func (p *ResumeUploader) Put(ctx context.Context, ret interface{}, uptoken string, key string, f io.ReaderAt,
fsize int64, extra *RputExtra) (err error) {
err = p.rput(ctx, ret, uptoken, key, true, f, fsize, extra)
return
}
// PutWithoutKey 方法用来上传一个文件,支持断点续传和分块上传。文件命名方式首先看看
// uptoken 中是否设置了 saveKey如果设置了 saveKey那么按 saveKey 要求的规则生成 key否则自动以文件的 hash 做 key。
//
// ctx 是请求的上下文。
// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 CallbackUrl 或 ReturnBody那么返回的数据结构是 PutRet 结构。
// uptoken 是由业务服务器颁发的上传凭证。
// f 是文件内容的访问接口。考虑到需要支持分块上传和断点续传,要的是 io.ReaderAt 接口,而不是 io.Reader。
// fsize 是要上传的文件大小。
// extra 是上传的一些可选项。详细见 RputExtra 结构的描述。
//
func (p *ResumeUploader) PutWithoutKey(
ctx context.Context, ret interface{}, uptoken string, f io.ReaderAt, fsize int64, extra *RputExtra) (err error) {
err = p.rput(ctx, ret, uptoken, "", false, f, fsize, extra)
return
}
// PutFile 用来上传一个文件,支持断点续传和分块上传。
// 和 Put 不同的只是一个通过提供文件路径来访问文件内容,一个通过 io.ReaderAt 来访问。
//
// ctx 是请求的上下文。
// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 CallbackUrl 或 ReturnBody那么返回的数据结构是 PutRet 结构。
// uptoken 是由业务服务器颁发的上传凭证。
// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外key 为空字符串是合法的。
// localFile 是要上传的文件的本地路径。
// extra 是上传的一些可选项。详细见 RputExtra 结构的描述。
//
func (p *ResumeUploader) PutFile(
ctx context.Context, ret interface{}, uptoken, key, localFile string, extra *RputExtra) (err error) {
err = p.rputFile(ctx, ret, uptoken, key, true, localFile, extra)
return
}
// PutFileWithoutKey 上传一个文件,支持断点续传和分块上传。文件命名方式首先看看
// uptoken 中是否设置了 saveKey如果设置了 saveKey那么按 saveKey 要求的规则生成 key否则自动以文件的 hash 做 key。
// 和 PutWithoutKey 不同的只是一个通过提供文件路径来访问文件内容,一个通过 io.ReaderAt 来访问。
//
// ctx 是请求的上下文。
// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 CallbackUrl 或 ReturnBody那么返回的数据结构是 PutRet 结构。
// uptoken 是由业务服务器颁发的上传凭证。
// localFile 是要上传的文件的本地路径。
// extra 是上传的一些可选项。详细见 RputExtra 结构的描述。
//
func (p *ResumeUploader) PutFileWithoutKey(
ctx context.Context, ret interface{}, uptoken, localFile string, extra *RputExtra) (err error) {
return p.rputFile(ctx, ret, uptoken, "", false, localFile, extra)
}
func (p *ResumeUploader) rput(
ctx context.Context, ret interface{}, uptoken string,
key string, hasKey bool, f io.ReaderAt, fsize int64, extra *RputExtra) (err error) {
once.Do(initWorkers)
log := xlog.NewWith(ctx)
blockCnt := BlockCount(fsize)
if extra == nil {
extra = new(RputExtra)
}
if extra.Progresses == nil {
extra.Progresses = make([]BlkputRet, blockCnt)
} else if len(extra.Progresses) != blockCnt {
return ErrInvalidPutProgress
}
if extra.ChunkSize == 0 {
extra.ChunkSize = settings.ChunkSize
}
if extra.TryTimes == 0 {
extra.TryTimes = settings.TryTimes
}
if extra.Notify == nil {
extra.Notify = notifyNil
}
if extra.NotifyErr == nil {
extra.NotifyErr = notifyErrNil
}
//get up host
ak, bucket, gErr := getAkBucketFromUploadToken(uptoken)
if gErr != nil {
err = gErr
return
}
upHost, gErr := p.upHost(ak, bucket)
if gErr != nil {
err = gErr
return
}
var wg sync.WaitGroup
wg.Add(blockCnt)
last := blockCnt - 1
blkSize := 1 << blockBits
nfails := 0
p.client = newUptokenClient(p.client, uptoken)
for i := 0; i < blockCnt; i++ {
blkIdx := i
blkSize1 := blkSize
if i == last {
offbase := int64(blkIdx) << blockBits
blkSize1 = int(fsize - offbase)
}
task := func() {
defer wg.Done()
tryTimes := extra.TryTimes
lzRetry:
err := p.resumableBput(ctx, upHost, &extra.Progresses[blkIdx], f, blkIdx, blkSize1, extra)
if err != nil {
if tryTimes > 1 {
tryTimes--
log.Info("resumable.Put retrying ...", blkIdx, "reason:", err)
goto lzRetry
}
log.Warn("resumable.Put", blkIdx, "failed:", err)
extra.NotifyErr(blkIdx, blkSize1, err)
nfails++
}
}
tasks <- task
}
wg.Wait()
if nfails != 0 {
return ErrPutFailed
}
return p.Mkfile(ctx, upHost, ret, key, hasKey, fsize, extra)
}
func (p *ResumeUploader) rputFile(
ctx context.Context, ret interface{}, uptoken string,
key string, hasKey bool, localFile string, extra *RputExtra) (err error) {
f, err := os.Open(localFile)
if err != nil {
return
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return
}
return p.rput(ctx, ret, uptoken, key, hasKey, f, fi.Size(), extra)
}
func (p *ResumeUploader) upHost(ak, bucket string) (upHost string, err error) {
var zone *Zone
if p.cfg.Zone != nil {
zone = p.cfg.Zone
} else {
if v, zoneErr := GetZone(ak, bucket); zoneErr != nil {
err = zoneErr
return
} else {
zone = v
}
}
scheme := "http://"
if p.cfg.UseHTTPS {
scheme = "https://"
}
host := zone.SrcUpHosts[0]
if p.cfg.UseCdnDomains {
host = zone.CdnUpHosts[0]
}
upHost = fmt.Sprintf("%s%s", scheme, host)
return
}

View File

@@ -1,25 +0,0 @@
package storage
import (
"context"
"fmt"
"math/rand"
"testing"
)
func TestResumeUploadPutFile(t *testing.T) {
var putRet PutRet
ctx := context.TODO()
putPolicy := PutPolicy{
Scope: testBucket,
DeleteAfterDays: 7,
}
upToken := putPolicy.UploadToken(mac)
testKey := fmt.Sprintf("testRPutFileKey_%d", rand.Int())
err := resumeUploader.PutFile(ctx, &putRet, upToken, testKey, testLocalFile, nil)
if err != nil {
t.Fatalf("ResumeUploader#PutFile() error, %s", err)
}
t.Logf("Key: %s, Hash:%s", putRet.Key, putRet.Hash)
}

View File

@@ -1,73 +0,0 @@
package storage
import (
"encoding/base64"
"encoding/json"
"errors"
"strings"
"time"
"github.com/qiniu/api.v7/auth/qbox"
)
// PutPolicy 表示文件上传的上传策略
type PutPolicy struct {
Scope string `json:"scope"`
Expires uint32 `json:"deadline"` // 截止时间(以秒为单位)
IsPrefixalScope int `json:"isPrefixalScope,omitempty"`
InsertOnly uint16 `json:"insertOnly,omitempty"` // 若非0, 即使Scope为 Bucket:Key 的形式也是insert only
DetectMime uint8 `json:"detectMime,omitempty"` // 若非0, 则服务端根据内容自动确定 MimeType
FsizeLimit int64 `json:"fsizeLimit,omitempty"`
MimeLimit string `json:"mimeLimit,omitempty"`
SaveKey string `json:"saveKey,omitempty"`
CallbackFetchKey uint8 `json:"callbackFetchKey,omitempty"`
CallbackURL string `json:"callbackUrl,omitempty"`
CallbackHost string `json:"callbackHost,omitempty"`
CallbackBody string `json:"callbackBody,omitempty"`
CallbackBodyType string `json:"callbackBodyType,omitempty"`
ReturnURL string `json:"returnUrl,omitempty"`
ReturnBody string `json:"returnBody,omitempty"`
PersistentOps string `json:"persistentOps,omitempty"`
PersistentNotifyURL string `json:"persistentNotifyUrl,omitempty"`
PersistentPipeline string `json:"persistentPipeline,omitempty"`
EndUser string `json:"endUser,omitempty"`
DeleteAfterDays int `json:"deleteAfterDays,omitempty"`
FileType int `json:"fileType,omitempty"`
}
// UploadToken 方法用来进行上传凭证的生成
func (p *PutPolicy) UploadToken(mac *qbox.Mac) (token string) {
if p.Expires == 0 {
p.Expires = 3600 // 1 hour
}
p.Expires += uint32(time.Now().Unix())
putPolicyJSON, _ := json.Marshal(p)
token = mac.SignWithData(putPolicyJSON)
return
}
func getAkBucketFromUploadToken(token string) (ak, bucket string, err error) {
items := strings.Split(token, ":")
if len(items) != 3 {
err = errors.New("invalid upload token, format error")
return
}
ak = items[0]
policyBytes, dErr := base64.URLEncoding.DecodeString(items[2])
if dErr != nil {
err = errors.New("invalid upload token, invalid put policy")
return
}
putPolicy := PutPolicy{}
uErr := json.Unmarshal(policyBytes, &putPolicy)
if uErr != nil {
err = errors.New("invalid upload token, invalid put policy")
return
}
bucket = strings.Split(putPolicy.Scope, ":")[0]
return
}

View File

@@ -1,22 +0,0 @@
package storage
import (
"time"
)
// ParsePutTime 提供了将PutTime转换为 time.Time 的功能
func ParsePutTime(putTime int64) (t time.Time) {
t = time.Unix(0, putTime*100)
return
}
// IsContextExpired 检查分片上传的ctx是否过期提前一天让它过期
// 因为我们认为如果断点继续上传的话最长需要1天时间
func IsContextExpired(blkPut BlkputRet) bool {
if blkPut.Ctx == "" {
return false
}
target := time.Unix(blkPut.ExpiredAt, 0).AddDate(0, 0, -1)
now := time.Now()
return now.After(target)
}

View File

@@ -1,190 +0,0 @@
package storage
import (
"context"
"fmt"
"strings"
"sync"
"github.com/qiniu/x/rpc.v7"
)
// Zone 为空间对应的机房属性,主要包括了上传,资源管理等操作的域名
type Zone struct {
SrcUpHosts []string
CdnUpHosts []string
RsHost string
RsfHost string
ApiHost string
IovipHost string
}
func (z *Zone) String() string {
str := ""
str += fmt.Sprintf("SrcUpHosts: %v\n", z.SrcUpHosts)
str += fmt.Sprintf("CdnUpHosts: %v\n", z.CdnUpHosts)
str += fmt.Sprintf("IovipHost: %s\n", z.IovipHost)
str += fmt.Sprintf("RsHost: %s\n", z.RsHost)
str += fmt.Sprintf("RsfHost: %s\n", z.RsfHost)
str += fmt.Sprintf("ApiHost: %s\n", z.ApiHost)
return str
}
// ZoneHuadong 表示华东机房
var ZoneHuadong = Zone{
SrcUpHosts: []string{
"up.qiniup.com",
"up-nb.qiniup.com",
"up-xs.qiniup.com",
},
CdnUpHosts: []string{
"upload.qiniup.com",
"upload-nb.qiniup.com",
"upload-xs.qiniup.com",
},
RsHost: "rs.qiniu.com",
RsfHost: "rsf.qiniu.com",
ApiHost: "api.qiniu.com",
IovipHost: "iovip.qbox.me",
}
// ZoneHuabei 表示华北机房
var ZoneHuabei = Zone{
SrcUpHosts: []string{
"up-z1.qiniup.com",
},
CdnUpHosts: []string{
"upload-z1.qiniup.com",
},
RsHost: "rs-z1.qiniu.com",
RsfHost: "rsf-z1.qiniu.com",
ApiHost: "api-z1.qiniu.com",
IovipHost: "iovip-z1.qbox.me",
}
// ZoneHuanan 表示华南机房
var ZoneHuanan = Zone{
SrcUpHosts: []string{
"up-z2.qiniup.com",
"up-gz.qiniup.com",
"up-fs.qiniup.com",
},
CdnUpHosts: []string{
"upload-z2.qiniup.com",
"upload-gz.qiniup.com",
"upload-fs.qiniup.com",
},
RsHost: "rs-z2.qiniu.com",
RsfHost: "rsf-z2.qiniu.com",
ApiHost: "api-z2.qiniu.com",
IovipHost: "iovip-z2.qbox.me",
}
// ZoneBeimei 表示北美机房
var ZoneBeimei = Zone{
SrcUpHosts: []string{
"up-na0.qiniu.com",
},
CdnUpHosts: []string{
"upload-na0.qiniu.com",
},
RsHost: "rs-na0.qiniu.com",
RsfHost: "rsf-na0.qiniu.com",
ApiHost: "api-na0.qiniu.com",
IovipHost: "iovip-na0.qbox.me",
}
// for programmers
var Zone_z0 = ZoneHuadong
var Zone_z1 = ZoneHuabei
var Zone_z2 = ZoneHuanan
var Zone_na0 = ZoneBeimei
// UcHost 为查询空间相关域名的API服务地址
const UcHost = "https://uc.qbox.me"
// UcQueryRet 为查询请求的回复
type UcQueryRet struct {
TTL int `json:"ttl"`
Io map[string]map[string][]string `json:"io"`
Up map[string]UcQueryUp `json:"up"`
}
// UcQueryUp 为查询请求回复中的上传域名信息
type UcQueryUp struct {
Main []string `json:"main,omitempty"`
Backup []string `json:"backup,omitempty"`
Info string `json:"info,omitempty"`
}
var (
zoneMutext sync.RWMutex
zoneCache = make(map[string]*Zone)
)
// GetZone 用来根据ak和bucket来获取空间相关的机房信息
func GetZone(ak, bucket string) (zone *Zone, err error) {
zoneID := fmt.Sprintf("%s:%s", ak, bucket)
//check from cache
zoneMutext.RLock()
if v, ok := zoneCache[zoneID]; ok {
zone = v
}
zoneMutext.RUnlock()
if zone != nil {
return
}
//query from server
reqURL := fmt.Sprintf("%s/v2/query?ak=%s&bucket=%s", UcHost, ak, bucket)
var ret UcQueryRet
ctx := context.Background()
qErr := rpc.DefaultClient.CallWithForm(ctx, &ret, "GET", reqURL, nil)
if qErr != nil {
err = fmt.Errorf("query zone error, %s", qErr.Error())
return
}
ioHost := ret.Io["src"]["main"][0]
srcUpHosts := ret.Up["src"].Main
if ret.Up["src"].Backup != nil {
srcUpHosts = append(srcUpHosts, ret.Up["src"].Backup...)
}
cdnUpHosts := ret.Up["acc"].Main
if ret.Up["acc"].Backup != nil {
cdnUpHosts = append(cdnUpHosts, ret.Up["acc"].Backup...)
}
zone = &Zone{
SrcUpHosts: srcUpHosts,
CdnUpHosts: cdnUpHosts,
IovipHost: ioHost,
RsHost: DefaultRsHost,
RsfHost: DefaultRsfHost,
ApiHost: DefaultAPIHost,
}
//set specific hosts if possible
setSpecificHosts(ioHost, zone)
zoneMutext.Lock()
zoneCache[zoneID] = zone
zoneMutext.Unlock()
return
}
func setSpecificHosts(ioHost string, zone *Zone) {
if strings.Contains(ioHost, "-z1") {
zone.RsHost = "rs-z1.qiniu.com"
zone.RsfHost = "rsf-z1.qiniu.com"
zone.ApiHost = "api-z1.qiniu.com"
} else if strings.Contains(ioHost, "-z2") {
zone.RsHost = "rs-z2.qiniu.com"
zone.RsfHost = "rsf-z2.qiniu.com"
zone.ApiHost = "api-z2.qiniu.com"
} else if strings.Contains(ioHost, "-na0") {
zone.RsHost = "rs-na0.qiniu.com"
zone.RsfHost = "rsf-na0.qiniu.com"
zone.ApiHost = "api-na0.qiniu.com"
}
}

View File

@@ -1,10 +0,0 @@
DIR=$(cd ../; pwd)
export GOPATH=$DIR:$GOPATH
export QINIU_ACCESS_KEY=ak
export QINIU_SECRET_KEY=sk
export QINIU_TEST_BUCKET=gosdk
export QINIU_TEST_BUCKET_PRIVATE=gosdk.qiniudn.com
export QINIU_TEST_DOMAIN=gosdk.qiniudn.com
export QINIU_TEST_DOMAIN_PRIVATE=gosdk.qiniudn.com
export QINIU_TEST_PIPELINE=sdktest
export TRAVIS_BUILD_DIR=/Users/jemy/Downloads

24
vendor/github.com/qiniu/x/.gitignore generated vendored
View File

@@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View File

@@ -1,10 +0,0 @@
language: go
install:
- export QINIU_SRC=$HOME/gopath/src
- mkdir -p $QINIU_SRC/qiniupkg.com
- mv $QINIU_SRC/github.com/qiniu/x $QINIU_SRC/qiniupkg.com/x
- export TRAVIS_BUILD_DIR=$QINIU_SRC/qiniupkg.com/x
- cd $TRAVIS_BUILD_DIR
- go get golang.org/x/net/context
- go get github.com/stretchr/testify/assert

27
vendor/github.com/qiniu/x/README.md generated vendored
View File

@@ -1,27 +0,0 @@
qiniupkg.com/x
===============
[![Build Status](https://travis-ci.org/qiniu/x.svg?branch=develop)](https://travis-ci.org/qiniu/x) [![GoDoc](https://godoc.org/qiniupkg.com/x?status.svg)](https://godoc.org/qiniupkg.com/x)
# 下载
```
go get qiniupkg.com/x
```
# 使用文档
* [qiniupkg.com/x/bytes.v7](http://godoc.org/qiniupkg.com/x/bytes.v7)
* [qiniupkg.com/x/cmdline.v7](http://godoc.org/qiniupkg.com/x/cmdline.v7)
* [qiniupkg.com/x/config.v7](http://godoc.org/qiniupkg.com/x/config.v7)
* [qiniupkg.com/x/ctype.v7](http://godoc.org/qiniupkg.com/x/ctype.v7)
* [qiniupkg.com/x/jsonutil.v7](http://godoc.org/qiniupkg.com/x/jsonutil.v7)
* [qiniupkg.com/x/log.v7](http://godoc.org/qiniupkg.com/x/log.v7)
* [qiniupkg.com/x/mockhttp.v7](http://godoc.org/qiniupkg.com/x/mockhttp.v7)
* [qiniupkg.com/x/reqid.v7](http://godoc.org/qiniupkg.com/x/reqid.v7)
* [qiniupkg.com/x/rpc.v7](http://godoc.org/qiniupkg.com/x/rpc.v7)
* [qiniupkg.com/x/rpc.v7/gob](http://godoc.org/qiniupkg.com/x/rpc.v7/gob)
* [qiniupkg.com/x/ts.v7](http://godoc.org/qiniupkg.com/x/ts.v7)
* [qiniupkg.com/x/url.v7](http://godoc.org/qiniupkg.com/x/url.v7)
* [qiniupkg.com/x/xlog.v7](http://godoc.org/qiniupkg.com/x/xlog.v7)

View File

@@ -1,4 +0,0 @@
qiniupkg.com/x/bytes.v7
=====
Extension module of golang bytes processing

View File

@@ -1,177 +0,0 @@
package bytes
import (
"io"
"syscall"
)
// ---------------------------------------------------
type Reader struct {
b []byte
off int
}
func NewReader(val []byte) *Reader {
return &Reader{val, 0}
}
func (r *Reader) Len() int {
if r.off >= len(r.b) {
return 0
}
return len(r.b) - r.off
}
func (r *Reader) Bytes() []byte {
return r.b[r.off:]
}
func (r *Reader) SeekToBegin() (err error) {
r.off = 0
return
}
func (r *Reader) Seek(offset int64, whence int) (ret int64, err error) {
switch whence {
case 0:
case 1:
offset += int64(r.off)
case 2:
offset += int64(len(r.b))
default:
err = syscall.EINVAL
return
}
if offset < 0 {
err = syscall.EINVAL
return
}
if offset >= int64(len(r.b)) {
r.off = len(r.b)
} else {
r.off = int(offset)
}
ret = int64(r.off)
return
}
func (r *Reader) Read(val []byte) (n int, err error) {
n = copy(val, r.b[r.off:])
if n == 0 && len(val) != 0 {
err = io.EOF
return
}
r.off += n
return
}
func (r *Reader) Close() (err error) {
return
}
// ---------------------------------------------------
type Writer struct {
b []byte
n int
}
func NewWriter(buff []byte) *Writer {
return &Writer{buff, 0}
}
func (p *Writer) Write(val []byte) (n int, err error) {
n = copy(p.b[p.n:], val)
if n == 0 && len(val) > 0 {
err = io.EOF
return
}
p.n += n
return
}
func (p *Writer) Len() int {
return p.n
}
func (p *Writer) Bytes() []byte {
return p.b[:p.n]
}
func (p *Writer) Reset() {
p.n = 0
}
// ---------------------------------------------------
type Buffer struct {
b []byte
}
func NewBuffer() *Buffer {
return new(Buffer)
}
func (p *Buffer) ReadAt(buf []byte, off int64) (n int, err error) {
ioff := int(off)
if len(p.b) <= ioff {
return 0, io.EOF
}
n = copy(buf, p.b[ioff:])
if n != len(buf) {
err = io.EOF
}
return
}
func (p *Buffer) WriteAt(buf []byte, off int64) (n int, err error) {
ioff := int(off)
iend := ioff + len(buf)
if len(p.b) < iend {
if len(p.b) == ioff {
p.b = append(p.b, buf...)
return len(buf), nil
}
zero := make([]byte, iend-len(p.b))
p.b = append(p.b, zero...)
}
copy(p.b[ioff:], buf)
return len(buf), nil
}
func (p *Buffer) WriteStringAt(buf string, off int64) (n int, err error) {
ioff := int(off)
iend := ioff + len(buf)
if len(p.b) < iend {
if len(p.b) == ioff {
p.b = append(p.b, buf...)
return len(buf), nil
}
zero := make([]byte, iend-len(p.b))
p.b = append(p.b, zero...)
}
copy(p.b[ioff:], buf)
return len(buf), nil
}
func (p *Buffer) Truncate(fsize int64) (err error) {
size := int(fsize)
if len(p.b) < size {
zero := make([]byte, size-len(p.b))
p.b = append(p.b, zero...)
} else {
p.b = p.b[:size]
}
return nil
}
func (p *Buffer) Buffer() []byte {
return p.b
}
func (p *Buffer) Len() int {
return len(p.b)
}
// ---------------------------------------------------

View File

@@ -1,60 +0,0 @@
package bytes
import (
"io"
"testing"
)
// ---------------------------------------------------
func TestBuffer(t *testing.T) {
b := NewBuffer()
n, err := b.WriteStringAt("Hello", 4)
if n != 5 || err != nil {
t.Fatal("WriteStringAt failed:", n, err)
}
if b.Len() != 9 {
t.Fatal("Buffer.Len invalid (9 is required):", b.Len())
}
buf := make([]byte, 10)
n, err = b.ReadAt(buf, 50)
if n != 0 || err != io.EOF {
t.Fatal("ReadAt failed:", n, err)
}
n, err = b.ReadAt(buf, 6)
if n != 3 || err != io.EOF || string(buf[:n]) != "llo" {
t.Fatal("ReadAt failed:", n, err, string(buf[:n]))
}
n, err = b.WriteAt([]byte("Hi h"), 1)
if n != 4 || err != nil {
t.Fatal("WriteAt failed:", n, err)
}
if b.Len() != 9 {
t.Fatal("Buffer.Len invalid (9 is required):", b.Len())
}
n, err = b.ReadAt(buf, 0)
if n != 9 || err != io.EOF || string(buf[:n]) != "\x00Hi hello" {
t.Fatal("ReadAt failed:", n, err)
}
n, err = b.WriteStringAt("LO world!", 7)
if n != 9 || err != nil {
t.Fatal("WriteStringAt failed:", n, err)
}
if b.Len() != 16 {
t.Fatal("Buffer.Len invalid (16 is required):", b.Len())
}
buf = make([]byte, 17)
n, err = b.ReadAt(buf, 0)
if n != 16 || err != io.EOF || string(buf[:n]) != "\x00Hi helLO world!" {
t.Fatal("ReadAt failed:", n, err, string(buf[:n]))
}
}
// ---------------------------------------------------

View File

@@ -1,34 +0,0 @@
/*
包 qiniupkg.com/x/bytes.v7 提供了 byte slice 相关的功能扩展
NewReader 创建一个 byte slice 的只读流:
var slice []byte
...
r := bytes.NewReader(slice)
...
r.Seek(0, 0) // r.SeekToBegin()
...
和标准库的 bytes.NewReader 不同的是,这里的 Reader 支持 Seek。
NewWriter 创建一个有上限容量的写流:
slice := make([]byte, 1024)
w := bytes.NewWriter(slice)
...
writtenData := w.Bytes()
如果我们向 w 里面写入超过 1024 字节的数据,那么多余的数据会被丢弃。
NewBuffer 创建一个可随机读写的内存文件,支持 ReadAt/WriteAt 方法,而不是 Read/Write:
b := bytes.NewBuffer()
b.Truncate(100)
b.WriteAt([]byte("hello"), 100)
slice := make([]byte, 105)
n, err := b.ReadAt(slice, 0)
...
*/
package bytes

View File

@@ -1,54 +0,0 @@
package bytes
import (
"bytes"
)
// ---------------------------------------------------
func ReplaceAt(b []byte, off, nsrc int, dest []byte) []byte {
ndelta := len(dest) - nsrc
if ndelta < 0 {
left := b[off+nsrc:]
off += copy(b[off:], dest)
off += copy(b[off:], left)
return b[:off]
}
if ndelta > 0 {
b = append(b, dest[:ndelta]...)
copy(b[off+len(dest):], b[off+nsrc:])
copy(b[off:], dest)
} else {
copy(b[off:], dest)
}
return b
}
func ReplaceOne(b []byte, from int, src, dest []byte) ([]byte, int) {
pos := bytes.Index(b[from:], src)
if pos < 0 {
return b, -1
}
from += pos
return ReplaceAt(b, from, len(src), dest), from + len(dest)
}
func Replace(b []byte, src, dest []byte, n int) []byte {
from := 0
for n != 0 {
b, from = ReplaceOne(b, from, src, dest)
if from < 0 {
break
}
n--
}
return b
}
// ---------------------------------------------------

View File

@@ -1,54 +0,0 @@
package bytes
import (
"strings"
"testing"
)
type replaceCase struct {
s string
src string
dest string
n int
}
func stringReplace(b string, src, dest string, n int) string {
return string(Replace([]byte(b), []byte(src), []byte(dest), n))
}
func TestReplace(t *testing.T) {
cases := []replaceCase{
{"hello, world!", "world", "xsw", -1},
{"hello, world world world", "world", "xsw", 1},
{"hello, world world world", "world", "xsw", 2},
{"hello, world world world", "world", "xsw", -1},
{"hello, xsw!", "xsw", "world", -1},
{"hello, xsw xsw xsw", "xsw", "world", 1},
{"hello, xsw xsw xsw", "xsw", "world", 2},
{"hello, xsw xsw xsw", "xsw", "world", -1},
}
for _, c := range cases {
ret := stringReplace(c.s, c.src, c.dest, c.n)
expected := strings.Replace(c.s, c.src, c.dest, c.n)
if ret != expected {
t.Fatal("Replace failed:", c, "ret:", ret, "expected:", expected)
}
}
}
func stringInsertAt(b string, off int, text string) string {
return string(ReplaceAt([]byte(b), off, 0, []byte(text)))
}
func TestInsertAt(t *testing.T) {
ret := stringInsertAt("helloworld", 5, ", ")
if ret != "hello, world" {
t.Fatal("InsertAt failed:", ret)
}
}

View File

@@ -1,63 +0,0 @@
// This package provide a method to read and replace http.Request's body.
package seekable
import (
"errors"
"io"
"io/ioutil"
"net/http"
"qiniupkg.com/x/bytes.v7"
)
// ---------------------------------------------------
type Seekabler interface {
Bytes() []byte
Read(val []byte) (n int, err error)
SeekToBegin() error
}
type SeekableCloser interface {
Seekabler
io.Closer
}
// ---------------------------------------------------
type readCloser struct {
Seekabler
io.Closer
}
var ErrNoBody = errors.New("no body")
func New(req *http.Request) (r SeekableCloser, err error) {
if req.Body == nil {
return nil, ErrNoBody
}
var ok bool
if r, ok = req.Body.(SeekableCloser); ok {
return
}
b, err2 := ReadAll(req)
if err2 != nil {
return nil, err2
}
r = bytes.NewReader(b)
req.Body = readCloser{r, req.Body}
return
}
func ReadAll(req *http.Request) (b []byte, err error) {
if req.ContentLength > 0 {
b = make([]byte, int(req.ContentLength))
_, err = io.ReadFull(req.Body, b)
return
} else if req.ContentLength == 0 {
return nil, ErrNoBody
}
return ioutil.ReadAll(req.Body)
}
// ---------------------------------------------------

View File

@@ -1,43 +0,0 @@
package seekable
import (
"bytes"
"net/http"
"testing"
"github.com/stretchr/testify/assert"
)
func TestSeekable_EOFIfReqAlreadyParsed(t *testing.T) {
body := "a=1"
req, err := http.NewRequest("POST", "/a", bytes.NewBufferString(body))
assert.NoError(t, err)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.Header.Set("Content-Length", "3")
req.ParseForm()
_, err = New(req)
assert.Equal(t, err.Error(), "EOF")
}
func TestSeekable_WorkaroundForEOF(t *testing.T) {
body := "a=1"
req, err := http.NewRequest("POST", "/a", bytes.NewBufferString(body))
assert.NoError(t, err)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.Header.Set("Content-Length", "3")
_, _ = New(req)
req.ParseForm()
assert.Equal(t, req.FormValue("a"), "1")
_, err = New(req)
assert.NoError(t, err)
}
func TestSeekable(t *testing.T) {
body := "a=1"
req, err := http.NewRequest("POST", "/a", bytes.NewBufferString(body))
assert.NoError(t, err)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.Header.Set("Content-Length", "3")
_, err = New(req)
assert.NoError(t, err)
}

View File

@@ -1,264 +0,0 @@
package cmdline
import (
"errors"
"strings"
. "qiniupkg.com/x/ctype.v7"
)
/* ---------------------------------------------------------------------------
Shell 基础规则:
* 多行字符串:用 '...' 或 "..."。其中 " 会自动转义 $(var),而 ' 不会。
* 普通字符串:用 [ \t] 分隔。转义符以 \ 开头。
* 外部命令:`...`。
七牛规则:
* 外部命令: `...` 或 |...| 。
* 多行字符串:用 '...' 或 ```\n...``` 或 ===\n...=== 不转义。用 "...",支持以 \ 开头的转义,也支持外部命令。
* 普通字符串:用 [ \t] 分隔。转义符以 \ 开头,同时也支持外部命令。
* 关于 $(var) 支持:每个命令自己执行 $(var) 的展开。不统一执行的原因是,在不同上下文需要不同的转义方式。
样例:
post http://rs.qiniu.com/delete/`base64 Bucket:Key`
auth `qbox AccessKey SecretKey`
ret 200
post http://rs.qiniu.com/batch
auth qboxtest
form op=/delete/`base64 Bucket:Key`&op=/delete/`base64 Bucket2:Key2`
ret 200
post http://rs.qiniu.com/batch
auth qboxtest
form op=/delete/`base64 Bucket:Key`&op=/delete/`base64 Bucket:NotExistKey`
ret 298
json '[
{"code": 200}, {"code": 612}
]'
equal $(code1) 200
// -------------------------------------------------------------------------*/
var (
EOF = errors.New("end of file")
ErrUnsupportedFeatureSubCmd = errors.New("unsupported feature: sub command")
ErrUnsupportedFeatureMultiCmds = errors.New("unsupported feature: multi commands")
ErrInvalidEscapeChar = errors.New("invalid escape char")
ErrIncompleteStringExpectQuot = errors.New("incomplete string, expect \"")
ErrIncompleteStringExpectSquot = errors.New("incomplete string, expect '")
ErrIncompleteStringExpectBacktick = errors.New("incomplete string, expect ` or |")
)
var (
errEOL = errors.New("end of line")
)
// ---------------------------------------------------------------------------
func Skip(str string, typeMask uint32) string {
for i := 0; i < len(str); i++ {
if !Is(typeMask, rune(str[i])) {
return str[i:]
}
}
return ""
}
func Find(str string, typeMask uint32) (n int) {
for n = 0; n < len(str); n++ {
if Is(typeMask, rune(str[n])) {
break
}
}
return
}
// ---------------------------------------------------------------------------
// EOL = \r\n? | \n
//
func requireEOL(code string) (hasEOL bool, codeNext string) {
if strings.HasPrefix(code, "\r") {
if strings.HasPrefix(code[1:], "\n") {
return true, code[2:]
}
} else if !strings.HasPrefix(code, "\n") {
return false, code
}
return true, code[1:]
}
// ---------------------------------------------------------------------------
type Parser struct {
ExecSub func(code string) (string, error)
Escape func(c byte) string
comment bool
}
func NewParser() *Parser {
return &Parser{
ExecSub: defaultExecSub,
Escape: defaultEscape,
}
}
func defaultExecSub(code string) (string, error) {
return "", ErrUnsupportedFeatureSubCmd
}
// ---------------------------------------------------------------------------
const (
endOfLine = EOL | SEMICOLON // [\r\n;]
blanks = SPACE_BAR | TAB
blankAndEOLs = SPACE_BAR | TAB | endOfLine
)
const (
endMask_QuotString = RDIV | BACKTICK | OR | QUOT // [\\`|"]
endMask_NonquotString = RDIV | BACKTICK | OR | blankAndEOLs // [\\`| \t\r\n;]
)
func (p *Parser) parseString(
code string, endMask uint32) (item string, ok bool, codeNext string, err error) {
codeNext = code
for {
n := Find(codeNext, endMask)
if n > 0 {
item += codeNext[:n]
ok = true
}
if len(codeNext) == n {
codeNext = ""
if endMask == endMask_QuotString {
err = ErrIncompleteStringExpectQuot
} else {
err = EOF
}
return
}
switch codeNext[n] {
case '\\':
if len(codeNext) == n+1 {
err = ErrInvalidEscapeChar
return
}
item += p.Escape(codeNext[n+1])
codeNext = codeNext[n+2:]
case '`', '|':
c := codeNext[n]
codeNext = codeNext[n+1:]
len := strings.IndexByte(codeNext, c)
if len < 0 {
err = ErrIncompleteStringExpectBacktick
return
}
if !p.comment {
valSub, errSub := p.ExecSub(codeNext[:len])
if errSub != nil {
err = errors.New("Exec `" + codeNext[:len] + "` failed: " + errSub.Error())
return
}
item += valSub
}
codeNext = codeNext[len+1:]
case '"':
ok = true
codeNext = codeNext[n+1:]
return
default:
if Is(endOfLine, rune(codeNext[n])) {
err = errEOL
}
codeNext = codeNext[n+1:]
return
}
ok = true
}
return
}
func (p *Parser) parseItem(
code string, skipMask uint32) (item string, ok bool, codeNext string, err error) {
codeNext = Skip(code, skipMask)
if len(codeNext) == 0 {
err = EOF
return
}
switch codeNext[0] {
case '"':
return p.parseString(codeNext[1:], endMask_QuotString)
case '\'':
codeNext = codeNext[1:]
len := strings.IndexByte(codeNext, '\'')
if len < 0 {
err = ErrIncompleteStringExpectSquot
return
}
return codeNext[:len], true, codeNext[len+1:], nil
default:
if strings.HasPrefix(codeNext, "```") || strings.HasPrefix(codeNext, "===") {
endMark := codeNext[:3]
_, codeNext = requireEOL(codeNext[3:])
len := strings.Index(codeNext, endMark)
if len < 0 {
err = errors.New("incomplete string, expect " + endMark)
return
}
return codeNext[:len], true, codeNext[len+3:], nil
}
return p.parseString(codeNext, endMask_NonquotString)
}
}
func (p *Parser) ParseCmd(cmdline string) (cmd []string, err error) {
cmd, _, err = p.ParseCode(cmdline)
if err == EOF && len(cmd) > 0 {
return cmd, nil
}
if err == nil {
err = ErrUnsupportedFeatureMultiCmds
}
return
}
func (p *Parser) ParseCode(code string) (cmd []string, codeNext string, err error) {
item, ok, codeNext, err := p.parseItem(code, blankAndEOLs)
if !ok {
return
}
p.comment = strings.HasPrefix(item, "#")
cmd = append(cmd, item)
for err == nil {
item, ok, codeNext, err = p.parseItem(codeNext, blanks)
if ok {
cmd = append(cmd, item)
}
}
if err == errEOL {
err = nil
}
if p.comment {
cmd = nil
}
return
}
// ---------------------------------------------------------------------------

View File

@@ -1,173 +0,0 @@
package cmdline
import (
"reflect"
"testing"
)
// ---------------------------------------------------------------------------
func equalErr(err error, errExp interface{}) bool {
if err == nil || errExp == nil {
return err == nil && errExp == nil
}
return err.Error() == errExp.(string)
}
// ---------------------------------------------------------------------------
func TestComment(t *testing.T) {
execSub := false
ctx := Parser{
ExecSub: func(code string) (string, error) {
execSub = true
return "[" + code + "]", nil
},
Escape: func(c byte) string {
return string(c)
},
}
cmd, codeNext, err := ctx.ParseCode("#abc `calc $(a)+$(b)`")
if err != EOF || codeNext != "" {
t.Fatal("ParseCode: eof is expected")
}
if execSub {
t.Fatal("don't execSub")
}
if len(cmd) != 0 {
t.Fatal("len(cmd) != 0")
}
}
// ---------------------------------------------------------------------------
type caseParse struct {
code string
cmd []string
codeNext string
err interface{}
}
func TestParse(t *testing.T) {
cases := []caseParse{
{
code: ";b",
cmd: []string{"b"},
codeNext: "",
err: "end of file",
},
{
code: ";b;abc",
cmd: []string{"b"},
codeNext: "abc",
err: nil,
},
{
code: "a`b`\\c",
cmd: []string{"a[b]c"},
codeNext: "",
err: "end of file",
},
{
code: "a`b`c 'c\\n`123`' \"c\\n\"",
cmd: []string{"a[b]c", "c\\n`123`", "cn"},
codeNext: "",
err: "end of file",
},
{
code: "auth qboxtest 'mac AccessKey SecretKey'",
cmd: []string{"auth", "qboxtest", "mac AccessKey SecretKey"},
codeNext: "",
err: "end of file",
},
{
code: "post http://rs.qiniu.com/delete/`base64 Bucket:Key`",
cmd: []string{"post", "http://rs.qiniu.com/delete/[base64 Bucket:Key]"},
codeNext: "",
err: "end of file",
},
{
code: "post http://rs.qiniu.com/delete `base64 Bucket:Key`",
cmd: []string{"post", "http://rs.qiniu.com/delete", "[base64 Bucket:Key]"},
codeNext: "",
err: "end of file",
},
{
code: "post http://rs.qiniu.com/delete/|base64 Bucket:Key|",
cmd: []string{"post", "http://rs.qiniu.com/delete/[base64 Bucket:Key]"},
codeNext: "",
err: "end of file",
},
{
code: `json '[
{"code": 200}, {"code": 612}
]'`,
cmd: []string{"json", `[
{"code": 200}, {"code": 612}
]`},
codeNext: "",
err: "end of file",
},
{
code: "auth qboxtest ```\nmac AccessKey SecretKey```",
cmd: []string{"auth", "qboxtest", "mac AccessKey SecretKey"},
codeNext: "",
err: "end of file",
},
{
code: "auth qboxtest ===\nmac AccessKey SecretKey```",
cmd: []string{"auth", "qboxtest"},
codeNext: "mac AccessKey SecretKey```",
err: "incomplete string, expect ===",
},
{
code: "auth qboxtest ===\rmac AccessKey SecretKey===",
cmd: []string{"auth", "qboxtest", "mac AccessKey SecretKey"},
codeNext: "",
err: "end of file",
},
{
code: "auth qboxtest ===\n\rmac AccessKey SecretKey===",
cmd: []string{"auth", "qboxtest", "\rmac AccessKey SecretKey"},
codeNext: "",
err: "end of file",
},
{
code: "auth qboxtest ===\r\n\nmac AccessKey SecretKey===",
cmd: []string{"auth", "qboxtest", "\nmac AccessKey SecretKey"},
codeNext: "",
err: "end of file",
},
{
code: "auth qboxtest ===mac AccessKey SecretKey===",
cmd: []string{"auth", "qboxtest", "mac AccessKey SecretKey"},
codeNext: "",
err: "end of file",
},
}
ctx := Parser{
ExecSub: func(code string) (string, error) {
return "[" + code + "]", nil
},
Escape: func(c byte) string {
return string(c)
},
}
for _, c := range cases {
cmd, codeNext, err := ctx.ParseCode(c.code)
if !equalErr(err, c.err) {
t.Fatal("Parse failed:", c, err)
}
if !reflect.DeepEqual(cmd, c.cmd) || codeNext != c.codeNext {
t.Fatal("Parse failed:", c, cmd, codeNext)
}
}
}
// ---------------------------------------------------------------------------

View File

@@ -1,98 +0,0 @@
package cmdline
// ---------------------------------------------------------------------------
const (
escTableBaseChar = '0'
escTableLen = ('z' - escTableBaseChar + 1)
)
var escTable = []byte{
0, // 0 [48]
49, // 1 [49]
50, // 2 [50]
51, // 3 [51]
52, // 4 [52]
53, // 5 [53]
54, // 6 [54]
55, // 7 [55]
56, // 8 [56]
57, // 9 [57]
58, // : [58]
59, // ; [59]
60, // < [60]
61, // = [61]
62, // > [62]
63, // ? [63]
64, // @ [64]
65, // A [65]
66, // B [66]
67, // C [67]
68, // D [68]
69, // E [69]
70, // F [70]
71, // G [71]
72, // H [72]
73, // I [73]
74, // J [74]
75, // K [75]
76, // L [76]
77, // M [77]
78, // N [78]
79, // O [79]
80, // P [80]
81, // Q [81]
82, // R [82]
83, // S [83]
84, // T [84]
85, // U [85]
86, // V [86]
87, // W [87]
88, // X [88]
89, // Y [89]
90, // Z [90]
91, // [ [91]
92, // \ [92]
93, // ] [93]
94, // ^ [94]
95, // _ [95]
96, // ` [96]
97, // a [97]
98, // b [98]
99, // c [99]
100, // d [100]
101, // e [101]
102, // f [102]
103, // g [103]
104, // h [104]
105, // i [105]
106, // j [106]
107, // k [107]
108, // l [108]
109, // m [109]
'\n', // n [110]
111, // o [111]
112, // p [112]
113, // q [113]
'\r', // r [114]
115, // s [115]
'\t', // t [116]
117, // u [117]
118, // v [118]
119, // w [119]
120, // x [120]
121, // y [121]
122, // z [122]
123, // { [123]
}
func defaultEscape(c byte) string {
if c - escTableBaseChar < escTableLen {
c = escTable[c - escTableBaseChar]
}
return string(c)
}
// ---------------------------------------------------------------------------

View File

@@ -1,41 +0,0 @@
package cmdline
import (
"testing"
)
// ---------------------------------------------------------------------------
func TestEscape(t *testing.T) {
for i := 0; i < escTableBaseChar; i++ {
checkEscapeChar(t, i, i)
}
table := make([]int, escTableLen)
for i := 0; i < escTableLen; i++ {
table[i] = escTableBaseChar + i
}
table['0'-escTableBaseChar] = 0
table['r'-escTableBaseChar] = '\r'
table['t'-escTableBaseChar] = '\t'
table['n'-escTableBaseChar] = '\n'
for i := 0; i < escTableLen; i++ {
checkEscapeChar(t, escTableBaseChar+i, table[i])
}
for i := int(escTableBaseChar + escTableLen); i < 256; i++ {
checkEscapeChar(t, i, i)
}
}
func checkEscapeChar(t *testing.T, i, exp int) {
ret := defaultEscape(byte(i))
if ret != string(exp) {
t.Fatal("escapeChar failed:", i)
}
}
// ---------------------------------------------------------------------------

View File

@@ -1,41 +0,0 @@
package config
import (
"errors"
"os"
)
var homeEnvNames = [][]string{
{"HOME"},
{"HOMEDRIVE", "HOMEPATH"},
}
var (
ErrHomeNotFound = errors.New("$HOME not found")
)
func getEnv(name []string) (v string) {
if len(name) == 1 {
return os.Getenv(name[0])
}
for _, k := range name {
v += os.Getenv(k)
}
return
}
func GetDir(app string) (dir string, err error) {
for _, name := range homeEnvNames {
home := getEnv(name)
if home == "" {
continue
}
dir = home + "/." + app
err = os.MkdirAll(dir, 0777)
return
}
return "", ErrHomeNotFound
}

View File

@@ -1,116 +0,0 @@
package config
import (
"bytes"
"encoding/json"
"flag"
"io/ioutil"
"qiniupkg.com/x/log.v7"
)
var (
confName *string
)
func Init(cflag, app, default_conf string) {
confDir, _ := GetDir(app)
confName = flag.String(cflag, confDir+"/"+default_conf, "the config file")
}
func GetPath() string {
if confName != nil {
return *confName
}
return ""
}
func Load(conf interface{}) (err error) {
if !flag.Parsed() {
flag.Parse()
}
log.Info("Use the config file of ", *confName)
return LoadEx(conf, *confName)
}
func LoadEx(conf interface{}, confName string) (err error) {
data, err := ioutil.ReadFile(confName)
if err != nil {
log.Error("Load conf failed:", err)
return
}
data = trimComments(data)
err = json.Unmarshal(data, conf)
if err != nil {
log.Error("Parse conf failed:", err)
}
return
}
func LoadFile(conf interface{}, confName string) (err error) {
data, err := ioutil.ReadFile(confName)
if err != nil {
return
}
data = trimComments(data)
return json.Unmarshal(data, conf)
}
func LoadBytes(conf interface{}, data []byte) (err error) {
return json.Unmarshal(trimComments(data), conf)
}
func LoadString(conf interface{}, data string) (err error) {
return json.Unmarshal(trimComments([]byte(data)), conf)
}
func trimComments(data []byte) (data1 []byte) {
var line []byte
data1 = data[:0]
for {
pos := bytes.IndexByte(data, '\n')
if pos < 0 {
line = data
} else {
line = data[:pos+1]
}
data1 = append(data1, trimCommentsLine(line)...)
if pos < 0 {
return
}
data = data[pos+1:]
}
}
func trimCommentsLine(line []byte) []byte {
n := len(line)
quoteCount := 0
for i := 0; i < n; i++ {
c := line[i]
switch c {
case '\\':
i++
case '"':
quoteCount++
case '#':
if (quoteCount&1) == 0 {
return line[:i]
}
}
}
return line
}

View File

@@ -1,59 +0,0 @@
package config_test
import (
"bytes"
"encoding/json"
"testing"
"qiniupkg.com/x/config.v7"
)
func TestTrimComments(t *testing.T) {
confData := `{
"debug_level": 0, # 调试级别
"rs_host": "http://localhost:15001", #RS服务
"limit": 5, #限制数
"retryTimes": 56,
"quote0": "###",
"quote": "quo\\\"\\#",
"ant": "ant\\#" #123
}`
confDataExp := `{
"debug_level": 0,
"rs_host": "http://localhost:15001",
"limit": 5,
"retryTimes": 56,
"quote0": "###",
"quote": "quo\\\"\\#",
"ant": "ant\\#"
}`
var (
conf, confExp interface{}
)
err := config.LoadString(&conf, confData)
if err != nil {
t.Fatal("config.LoadString(conf) failed:", err)
}
err = config.LoadString(&confExp, confDataExp)
if err != nil {
t.Fatal("config.LoadString(confExp) failed:", err)
}
b, err := json.Marshal(conf)
if err != nil {
t.Fatal("json.Marshal failed:", err)
}
bExp, err := json.Marshal(confExp)
if err != nil {
t.Fatal("json.Marshal(exp) failed:", err)
}
if !bytes.Equal(b, bExp) {
t.Fatal("b != bExp")
}
}

View File

@@ -1,237 +0,0 @@
package ctype
const (
UPPER = 0x01 /* upper case letter[A-Z] */
LOWER = 0x02 /* lower case letter[a-z] */
DIGIT = 0x04 /* digit[0-9] */
UNDERLINE = 0x08 /* underline[_] */
XDIGIT = 0x10 /* xdigit[0-9a-fA-F] */
EOL = 0x20 /* [\r\n] */
ADD = 0x40 /* [+] */
SUB = 0x80 /* [-] */
MUL = 0x100 /* [*] */
DIV = 0x200 /* [/] */
LT = 0x400 /* [<] */
GT = 0x800 /* [>] */
EQ = 0x1000 /* [=] */
RDIV = 0x2000 /* [\\], right-division, anti-slash */
DOT = 0x4000 /* [.] */
COLON = 0x8000 /* [:], colon */
PERCENT = 0x10000 /* [%] */
AND = 0x20000 /* [&] */
OR = 0x40000 /* [|] */
SPACE_BAR = 0x80000 /* [ ] */
LCAP_R = 0x100000 /* [r] */
LCAP_T = 0x200000 /* [t] */
LCAP_N = 0x400000 /* [n] */
LCAP_W = 0x800000 /* [w] */
COMMA = 0x1000000 /* [,] */
SEMICOLON = 0x2000000 /* [;] */
TAB = 0x4000000 /* [\t] */
QUOT = 0x8000000 /* ["] */
BACKTICK = 0x10000000 /* [`] */
)
const (
BLANK = SPACE_BAR
TSPACE = TAB | EOL
SPACE = SPACE_BAR | TSPACE
PATH_SEP = DIV | RDIV
ALPHA = UPPER | LOWER
SYMBOL_FIRST_CHAR = ALPHA
SYMBOL_NEXT_CHAR = SYMBOL_FIRST_CHAR | DIGIT
CSYMBOL_FIRST_CHAR = ALPHA | UNDERLINE
CSYMBOL_NEXT_CHAR = CSYMBOL_FIRST_CHAR | DIGIT
XMLSYMBOL_FIRST_CHAR = CSYMBOL_FIRST_CHAR
XMLSYMBOL_NEXT_CHAR = CSYMBOL_NEXT_CHAR | SUB
DOMAIN_CHAR = ALPHA | DIGIT | SUB | ADD | DOT
BASE64 = ALPHA | DIGIT | ADD | DIV // [a-zA-Z0-9+/]
URLSAFE_BASE64 = ALPHA | DIGIT | SUB | UNDERLINE // [a-zA-Z0-9\-_]
)
// -----------------------------------------------------------
var table = []uint32{
0, // [0]
0, // [1]
0, // [2]
0, // [3]
0, // [4]
0, // [5]
0, // [6]
0, // [7]
0, // [8]
TAB, // [9]
EOL, // [10]
0, // [11]
0, // [12]
EOL, // [13]
0, // [14]
0, // [15]
0, // [16]
0, // [17]
0, // [18]
0, // [19]
0, // [20]
0, // [21]
0, // [22]
0, // [23]
0, // [24]
0, // [25]
0, // [26]
0, // [27]
0, // [28]
0, // [29]
0, // [30]
0, // [31]
SPACE_BAR, // [32]
0, // ! [33]
QUOT, // " [34]
0, // # [35]
0, // $ [36]
PERCENT, // % [37]
AND, // & [38]
0, // ' [39]
0, // ( [40]
0, // ) [41]
MUL, // * [42]
ADD, // + [43]
COMMA, // , [44]
SUB, // - [45]
DOT, // . [46]
DIV, // / [47]
DIGIT | XDIGIT, // 0 [48]
DIGIT | XDIGIT, // 1 [49]
DIGIT | XDIGIT, // 2 [50]
DIGIT | XDIGIT, // 3 [51]
DIGIT | XDIGIT, // 4 [52]
DIGIT | XDIGIT, // 5 [53]
DIGIT | XDIGIT, // 6 [54]
DIGIT | XDIGIT, // 7 [55]
DIGIT | XDIGIT, // 8 [56]
DIGIT | XDIGIT, // 9 [57]
COLON, // : [58]
SEMICOLON, // ; [59]
LT, // < [60]
EQ, // = [61]
GT, // > [62]
0, // ? [63]
0, // @ [64]
UPPER | XDIGIT, // A [65]
UPPER | XDIGIT, // B [66]
UPPER | XDIGIT, // C [67]
UPPER | XDIGIT, // D [68]
UPPER | XDIGIT, // E [69]
UPPER | XDIGIT, // F [70]
UPPER, // G [71]
UPPER, // H [72]
UPPER, // I [73]
UPPER, // J [74]
UPPER, // K [75]
UPPER, // L [76]
UPPER, // M [77]
UPPER, // N [78]
UPPER, // O [79]
UPPER, // P [80]
UPPER, // Q [81]
UPPER, // R [82]
UPPER, // S [83]
UPPER, // T [84]
UPPER, // U [85]
UPPER, // V [86]
UPPER, // W [87]
UPPER, // X [88]
UPPER, // Y [89]
UPPER, // Z [90]
0, // [ [91]
RDIV, // \ [92]
0, // ] [93]
0, // ^ [94]
UNDERLINE, // _ [95]
BACKTICK, // ` [96]
LOWER | XDIGIT, // a [97]
LOWER | XDIGIT, // b [98]
LOWER | XDIGIT, // c [99]
LOWER | XDIGIT, // d [100]
LOWER | XDIGIT, // e [101]
LOWER | XDIGIT, // f [102]
LOWER, // g [103]
LOWER, // h [104]
LOWER, // i [105]
LOWER, // j [106]
LOWER, // k [107]
LOWER, // l [108]
LOWER, // m [109]
LCAP_N | LOWER, // n [110]
LOWER, // o [111]
LOWER, // p [112]
LOWER, // q [113]
LCAP_R | LOWER, // r [114]
LOWER, // s [115]
LCAP_T | LOWER, // t [116]
LOWER, // u [117]
LOWER, // v [118]
LCAP_W | LOWER, // w [119]
LOWER, // x [120]
LOWER, // y [121]
LOWER, // z [122]
0, // { [123]
OR, // | [124]
0, // } [125]
0, // ~ [126]
0, // del [127]
}
// -----------------------------------------------------------
func Is(typeMask uint32, c rune) bool {
if uint(c) < uint(len(table)) {
return (typeMask & table[c]) != 0
}
return false
}
func IsType(typeMask uint32, str string) bool {
if str == "" {
return false
}
for _, c := range str {
if !Is(typeMask, c) {
return false
}
}
return true
}
func IsTypeEx(typeFirst, typeNext uint32, str string) bool {
if str == "" {
return false
}
for i, c := range str {
if i > 0 {
if !Is(typeNext, c) {
return false
}
} else {
if !Is(typeFirst, c) {
return false
}
}
}
return true
}
func IsCSymbol(str string) bool {
return IsTypeEx(CSYMBOL_FIRST_CHAR, CSYMBOL_NEXT_CHAR, str)
}
func IsXmlSymbol(str string) bool {
return IsTypeEx(XMLSYMBOL_FIRST_CHAR, XMLSYMBOL_NEXT_CHAR, str)
}
// -----------------------------------------------------------

View File

@@ -1,72 +0,0 @@
package ctype
import (
"testing"
)
type testCase struct {
c rune
mask uint32
is bool
}
type stringTestCase struct {
str string
maskFirst uint32
maskNext uint32
is bool
}
var isCases = []testCase{
{'-', DOMAIN_CHAR, true},
{'.', DOMAIN_CHAR, true},
{'_', DOMAIN_CHAR, false},
{'+', DOMAIN_CHAR, true},
{'a', DOMAIN_CHAR, true},
{'A', DOMAIN_CHAR, true},
{'0', DOMAIN_CHAR, true},
{':', DOMAIN_CHAR, false},
{'1', ALPHA, false},
{'a', ALPHA, true},
{'A', ALPHA, true},
}
var strCases = []stringTestCase{
{"", CSYMBOL_FIRST_CHAR, CSYMBOL_NEXT_CHAR, false},
{"123", CSYMBOL_FIRST_CHAR, CSYMBOL_NEXT_CHAR, false},
{"_", CSYMBOL_FIRST_CHAR, CSYMBOL_NEXT_CHAR, true},
{"_123", CSYMBOL_FIRST_CHAR, CSYMBOL_NEXT_CHAR, true},
{"x_123", CSYMBOL_FIRST_CHAR, CSYMBOL_NEXT_CHAR, true},
{"x_", CSYMBOL_FIRST_CHAR, CSYMBOL_NEXT_CHAR, true},
{"_x", CSYMBOL_FIRST_CHAR, CSYMBOL_NEXT_CHAR, true},
{"", CSYMBOL_FIRST_CHAR, CSYMBOL_FIRST_CHAR, false},
{"x_123", CSYMBOL_FIRST_CHAR, CSYMBOL_FIRST_CHAR, false},
{"x_", CSYMBOL_FIRST_CHAR, CSYMBOL_FIRST_CHAR, true},
{"_x", CSYMBOL_FIRST_CHAR, CSYMBOL_FIRST_CHAR, true},
{"_", CSYMBOL_FIRST_CHAR, CSYMBOL_FIRST_CHAR, true},
}
func TestIs(t *testing.T) {
for _, a := range isCases {
f := Is(a.mask, a.c)
if f != a.is {
t.Fatal("case:", a, "result:", f)
}
}
}
func TestIsTypeEx(t *testing.T) {
for _, a := range strCases {
f := IsTypeEx(a.maskFirst, a.maskNext, a.str)
if f != a.is {
t.Fatal("case:", a, "result:", f)
}
if a.maskFirst == a.maskNext {
f = IsType(a.maskFirst, a.str)
if f != a.is {
t.Fatal("case:", a, "result:", f)
}
}
}
}

9
vendor/github.com/qiniu/x/doc.go generated vendored
View File

@@ -1,9 +0,0 @@
package x
import (
_ "qiniupkg.com/x/bytes.v7"
_ "qiniupkg.com/x/ctype.v7"
_ "qiniupkg.com/x/rpc.v7"
_ "qiniupkg.com/x/url.v7"
)

View File

@@ -1,146 +0,0 @@
package errors
import (
"errors"
"fmt"
"runtime"
"strconv"
"strings"
)
const (
prefix = " ==> "
)
// --------------------------------------------------------------------
func New(msg string) error {
return errors.New(msg)
}
// --------------------------------------------------------------------
type appendDetailer interface {
AppendErrorDetail(b []byte) []byte
}
func appendErrorDetail(b []byte, err error) []byte {
if e, ok := err.(appendDetailer); ok {
return e.AppendErrorDetail(b)
}
b = append(b, prefix...)
return append(b, err.Error()...)
}
// --------------------------------------------------------------------
type errorDetailer interface {
ErrorDetail() string
}
func Detail(err error) string {
if e, ok := err.(errorDetailer); ok {
return e.ErrorDetail()
}
return err.Error()
}
// --------------------------------------------------------------------
type summaryErr interface {
SummaryErr() error
}
func Err(err error) error {
if e, ok := err.(summaryErr); ok {
return e.SummaryErr()
}
return err
}
// --------------------------------------------------------------------
type ErrorInfo struct {
err error
why error
cmd []interface{}
pc uintptr
}
func shortFile(file string) string {
pos := strings.LastIndex(file, "/src/")
if pos != -1 {
return file[pos+5:]
}
return file
}
func Info(err error, cmd ...interface{}) *ErrorInfo {
pc, _, _, ok := runtime.Caller(1)
if !ok {
pc = 0
}
return &ErrorInfo{cmd: cmd, err: Err(err), pc: pc}
}
func InfoEx(calldepth int, err error, cmd ...interface{}) *ErrorInfo {
pc, _, _, ok := runtime.Caller(calldepth+1)
if !ok {
pc = 0
}
return &ErrorInfo{cmd: cmd, err: Err(err), pc: pc}
}
func (r *ErrorInfo) Detail(err error) *ErrorInfo {
r.why = err
return r
}
func (r *ErrorInfo) NestedObject() interface{} {
return r.err
}
func (r *ErrorInfo) SummaryErr() error {
return r.err
}
func (r *ErrorInfo) Error() string {
return r.err.Error()
}
func (r *ErrorInfo) ErrorDetail() string {
b := make([]byte, 1, 64)
b[0] = '\n'
b = r.AppendErrorDetail(b)
return string(b)
}
func (r *ErrorInfo) AppendErrorDetail(b []byte) []byte {
b = append(b, prefix...)
if r.pc != 0 {
f := runtime.FuncForPC(r.pc)
if f != nil {
file, line := f.FileLine(r.pc)
b = append(b, shortFile(file)...)
b = append(b, ':')
b = append(b, strconv.Itoa(line)...)
b = append(b, ':', ' ')
fnName := f.Name()
fnName = fnName[strings.LastIndex(fnName, "/")+1:]
fnName = fnName[strings.Index(fnName, ".")+1:]
b = append(b, '[')
b = append(b, fnName...)
b = append(b, ']', ' ')
}
}
b = append(b, Detail(r.err)...)
b = append(b, ' ', '~', ' ')
b = append(b, fmt.Sprintln(r.cmd...)...)
if r.why != nil {
b = appendErrorDetail(b, r.why)
}
return b
}
// --------------------------------------------------------------------

View File

@@ -1,30 +0,0 @@
package errors
import (
"errors"
"syscall"
"testing"
)
func MysqlError(err error, cmd ...interface{}) error {
return InfoEx(1, syscall.EINVAL, cmd...).Detail(err)
}
func (r *ErrorInfo) makeError() error {
err := errors.New("detail error")
return MysqlError(err, "do sth failed")
}
func TestErrorsInfo(t *testing.T) {
err := new(ErrorInfo).makeError()
msg := Detail(err)
if msg != `
==> qiniupkg.com/x/errors.v7/error_info_test.go:17: [(*ErrorInfo).makeError] invalid argument ~ do sth failed
==> detail error` {
t.Fatal("TestErrorsInfo failed")
}
}

View File

@@ -1,19 +0,0 @@
package jsonutil
import (
"encoding/json"
"reflect"
"unsafe"
)
// ----------------------------------------------------------
func Unmarshal(data string, v interface{}) error {
sh := *(*reflect.StringHeader)(unsafe.Pointer(&data))
arr := (*[1<<30]byte)(unsafe.Pointer(sh.Data))
return json.Unmarshal(arr[:sh.Len], v)
}
// ----------------------------------------------------------

View File

@@ -1,19 +0,0 @@
package jsonutil
import (
"testing"
)
func Test(t *testing.T) {
var ret struct {
Id string `json:"id"`
}
err := Unmarshal(`{"id": "123"}`, &ret)
if err != nil {
t.Fatal("Unmarshal failed:", err)
}
if ret.Id != "123" {
t.Fatal("Unmarshal uncorrect:", ret.Id)
}
}

View File

@@ -1,4 +0,0 @@
log
===
Extension module of golang logging

View File

@@ -1,521 +0,0 @@
package log
import (
"bytes"
"fmt"
"io"
"os"
"runtime"
"strings"
"sync"
"time"
)
// These flags define which text to prefix to each log entry generated by the Logger.
const (
// Bits or'ed together to control what's printed. There is no control over the
// order they appear (the order listed here) or the format they present (as
// described in the comments). A colon appears after these items:
// 2009/0123 01:23:23.123123 /a/b/c/d.go:23: message
Ldate = 1 << iota // the date: 2009/0123
Ltime // the time: 01:23:23
Lmicroseconds // microsecond resolution: 01:23:23.123123. assumes Ltime.
Llongfile // full file name and line number: /a/b/c/d.go:23
Lshortfile // final file name element and line number: d.go:23. overrides Llongfile
Lmodule // module name
Llevel // level: 0(Debug), 1(Info), 2(Warn), 3(Error), 4(Panic), 5(Fatal)
LstdFlags = Ldate | Ltime | Lmicroseconds // initial values for the standard logger
Ldefault = Lmodule | Llevel | Lshortfile | LstdFlags
) // [prefix][time][level][module][shortfile|longfile]
const (
Ldebug = iota
Linfo
Lwarn
Lerror
Lpanic
Lfatal
)
var levels = []string{
"[DEBUG]",
"[INFO]",
"[WARN]",
"[ERROR]",
"[PANIC]",
"[FATAL]",
}
// A Logger represents an active logging object that generates lines of
// output to an io.Writer. Each logging operation makes a single call to
// the Writer's Write method. A Logger can be used simultaneously from
// multiple goroutines; it guarantees to serialize access to the Writer.
type Logger struct {
mu sync.Mutex // ensures atomic writes; protects the following fields
prefix string // prefix to write at beginning of each line
flag int // properties
Level int // debug level
out io.Writer // destination for output
buf bytes.Buffer // for accumulating text to write
levelStats [6]int64
}
// New creates a new Logger.
// The out variable sets the destination to which log data will be written.
// The prefix appears at the beginning of each generated log line.
// The flag argument defines the logging properties.
func New(out io.Writer, prefix string, flag int) *Logger {
return &Logger{out: out, prefix: prefix, Level: 1, flag: flag}
}
var Std = New(os.Stderr, "", Ldefault)
// Cheap integer to fixed-width decimal ASCII. Give a negative width to avoid zero-padding.
// Knows the buffer has capacity.
func itoa(buf *bytes.Buffer, i int, wid int) {
var u uint = uint(i)
if u == 0 && wid <= 1 {
buf.WriteByte('0')
return
}
// Assemble decimal in reverse order.
var b [32]byte
bp := len(b)
for ; u > 0 || wid > 0; u /= 10 {
bp--
wid--
b[bp] = byte(u%10) + '0'
}
// avoid slicing b to avoid an allocation.
for bp < len(b) {
buf.WriteByte(b[bp])
bp++
}
}
func shortFile(file string, flag int) string {
sep := "/"
if (flag & Lmodule) != 0 {
sep = "/src/"
}
pos := strings.LastIndex(file, sep)
if pos != -1 {
return file[pos+5:]
}
return file
}
func (l *Logger) formatHeader(buf *bytes.Buffer, t time.Time, file string, line int, lvl int, reqId string) {
if l.prefix != "" {
buf.WriteString(l.prefix)
}
if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 {
if l.flag&Ldate != 0 {
year, month, day := t.Date()
itoa(buf, year, 4)
buf.WriteByte('/')
itoa(buf, int(month), 2)
buf.WriteByte('/')
itoa(buf, day, 2)
buf.WriteByte(' ')
}
if l.flag&(Ltime|Lmicroseconds) != 0 {
hour, min, sec := t.Clock()
itoa(buf, hour, 2)
buf.WriteByte(':')
itoa(buf, min, 2)
buf.WriteByte(':')
itoa(buf, sec, 2)
if l.flag&Lmicroseconds != 0 {
buf.WriteByte('.')
itoa(buf, t.Nanosecond()/1e3, 6)
}
buf.WriteByte(' ')
}
}
if reqId != "" {
buf.WriteByte('[')
buf.WriteString(reqId)
buf.WriteByte(']')
}
if l.flag&Llevel != 0 {
buf.WriteString(levels[lvl])
}
if l.flag&(Lshortfile|Llongfile) != 0 {
if l.flag&Lshortfile != 0 {
file = shortFile(file, l.flag)
}
buf.WriteByte(' ')
buf.WriteString(file)
buf.WriteByte(':')
itoa(buf, line, -1)
buf.WriteString(": ")
}
}
// Output writes the output for a logging event. The string s contains
// the text to print after the prefix specified by the flags of the
// Logger. A newline is appended if the last character of s is not
// already a newline. Calldepth is used to recover the PC and is
// provided for generality, although at the moment on all pre-defined
// paths it will be 2.
func (l *Logger) Output(reqId string, lvl int, calldepth int, s string) error {
if lvl < l.Level {
return nil
}
now := time.Now() // get this early.
var file string
var line int
l.mu.Lock()
defer l.mu.Unlock()
if l.flag&(Lshortfile|Llongfile|Lmodule) != 0 {
// release lock while getting caller info - it's expensive.
l.mu.Unlock()
var ok bool
_, file, line, ok = runtime.Caller(calldepth)
if !ok {
file = "???"
line = 0
}
l.mu.Lock()
}
l.levelStats[lvl]++
l.buf.Reset()
l.formatHeader(&l.buf, now, file, line, lvl, reqId)
l.buf.WriteString(s)
if len(s) > 0 && s[len(s)-1] != '\n' {
l.buf.WriteByte('\n')
}
_, err := l.out.Write(l.buf.Bytes())
return err
}
// -----------------------------------------
// Printf calls l.Output to print to the logger.
// Arguments are handled in the manner of fmt.Printf.
func (l *Logger) Printf(format string, v ...interface{}) {
l.Output("", Linfo, 2, fmt.Sprintf(format, v...))
}
// Print calls l.Output to print to the logger.
// Arguments are handled in the manner of fmt.Print.
func (l *Logger) Print(v ...interface{}) { l.Output("", Linfo, 2, fmt.Sprint(v...)) }
// Println calls l.Output to print to the logger.
// Arguments are handled in the manner of fmt.Println.
func (l *Logger) Println(v ...interface{}) { l.Output("", Linfo, 2, fmt.Sprintln(v...)) }
// -----------------------------------------
func (l *Logger) Debugf(format string, v ...interface{}) {
if Ldebug < l.Level {
return
}
l.Output("", Ldebug, 2, fmt.Sprintf(format, v...))
}
func (l *Logger) Debug(v ...interface{}) {
if Ldebug < l.Level {
return
}
l.Output("", Ldebug, 2, fmt.Sprintln(v...))
}
// -----------------------------------------
func (l *Logger) Infof(format string, v ...interface{}) {
if Linfo < l.Level {
return
}
l.Output("", Linfo, 2, fmt.Sprintf(format, v...))
}
func (l *Logger) Info(v ...interface{}) {
if Linfo < l.Level {
return
}
l.Output("", Linfo, 2, fmt.Sprintln(v...))
}
// -----------------------------------------
func (l *Logger) Warnf(format string, v ...interface{}) {
l.Output("", Lwarn, 2, fmt.Sprintf(format, v...))
}
func (l *Logger) Warn(v ...interface{}) { l.Output("", Lwarn, 2, fmt.Sprintln(v...)) }
// -----------------------------------------
func (l *Logger) Errorf(format string, v ...interface{}) {
l.Output("", Lerror, 2, fmt.Sprintf(format, v...))
}
func (l *Logger) Error(v ...interface{}) { l.Output("", Lerror, 2, fmt.Sprintln(v...)) }
// -----------------------------------------
func (l *Logger) Fatal(v ...interface{}) {
l.Output("", Lfatal, 2, fmt.Sprint(v...))
os.Exit(1)
}
// Fatalf is equivalent to l.Printf() followed by a call to os.Exit(1).
func (l *Logger) Fatalf(format string, v ...interface{}) {
l.Output("", Lfatal, 2, fmt.Sprintf(format, v...))
os.Exit(1)
}
// Fatalln is equivalent to l.Println() followed by a call to os.Exit(1).
func (l *Logger) Fatalln(v ...interface{}) {
l.Output("", Lfatal, 2, fmt.Sprintln(v...))
os.Exit(1)
}
// -----------------------------------------
// Panic is equivalent to l.Print() followed by a call to panic().
func (l *Logger) Panic(v ...interface{}) {
s := fmt.Sprint(v...)
l.Output("", Lpanic, 2, s)
panic(s)
}
// Panicf is equivalent to l.Printf() followed by a call to panic().
func (l *Logger) Panicf(format string, v ...interface{}) {
s := fmt.Sprintf(format, v...)
l.Output("", Lpanic, 2, s)
panic(s)
}
// Panicln is equivalent to l.Println() followed by a call to panic().
func (l *Logger) Panicln(v ...interface{}) {
s := fmt.Sprintln(v...)
l.Output("", Lpanic, 2, s)
panic(s)
}
// -----------------------------------------
func (l *Logger) Stack(v ...interface{}) {
s := fmt.Sprint(v...)
s += "\n"
buf := make([]byte, 1024*1024)
n := runtime.Stack(buf, true)
s += string(buf[:n])
s += "\n"
l.Output("", Lerror, 2, s)
}
func (l *Logger) SingleStack(v ...interface{}) {
s := fmt.Sprint(v...)
s += "\n"
buf := make([]byte, 1024*1024)
n := runtime.Stack(buf, false)
s += string(buf[:n])
s += "\n"
l.Output("", Lerror, 2, s)
}
// -----------------------------------------
func (l *Logger) Stat() (stats []int64) {
l.mu.Lock()
v := l.levelStats
l.mu.Unlock()
return v[:]
}
// Flags returns the output flags for the logger.
func (l *Logger) Flags() int {
l.mu.Lock()
defer l.mu.Unlock()
return l.flag
}
// SetFlags sets the output flags for the logger.
func (l *Logger) SetFlags(flag int) {
l.mu.Lock()
defer l.mu.Unlock()
l.flag = flag
}
// Prefix returns the output prefix for the logger.
func (l *Logger) Prefix() string {
l.mu.Lock()
defer l.mu.Unlock()
return l.prefix
}
// SetPrefix sets the output prefix for the logger.
func (l *Logger) SetPrefix(prefix string) {
l.mu.Lock()
defer l.mu.Unlock()
l.prefix = prefix
}
// SetOutputLevel sets the output level for the logger.
func (l *Logger) SetOutputLevel(lvl int) {
l.mu.Lock()
defer l.mu.Unlock()
l.Level = lvl
}
// SetOutput sets the output destination for the standard logger.
func SetOutput(w io.Writer) {
Std.mu.Lock()
defer Std.mu.Unlock()
Std.out = w
}
// Flags returns the output flags for the standard logger.
func Flags() int {
return Std.Flags()
}
// SetFlags sets the output flags for the standard logger.
func SetFlags(flag int) {
Std.SetFlags(flag)
}
// Prefix returns the output prefix for the standard logger.
func Prefix() string {
return Std.Prefix()
}
// SetPrefix sets the output prefix for the standard logger.
func SetPrefix(prefix string) {
Std.SetPrefix(prefix)
}
func SetOutputLevel(lvl int) {
Std.SetOutputLevel(lvl)
}
func GetOutputLevel() int {
return Std.Level
}
// -----------------------------------------
// Print calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Print.
func Print(v ...interface{}) {
Std.Output("", Linfo, 2, fmt.Sprint(v...))
}
// Printf calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Printf.
func Printf(format string, v ...interface{}) {
Std.Output("", Linfo, 2, fmt.Sprintf(format, v...))
}
// Println calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Println.
func Println(v ...interface{}) {
Std.Output("", Linfo, 2, fmt.Sprintln(v...))
}
// -----------------------------------------
func Debugf(format string, v ...interface{}) {
if Ldebug < Std.Level {
return
}
Std.Output("", Ldebug, 2, fmt.Sprintf(format, v...))
}
func Debug(v ...interface{}) {
if Ldebug < Std.Level {
return
}
Std.Output("", Ldebug, 2, fmt.Sprintln(v...))
}
// -----------------------------------------
func Infof(format string, v ...interface{}) {
if Linfo < Std.Level {
return
}
Std.Output("", Linfo, 2, fmt.Sprintf(format, v...))
}
func Info(v ...interface{}) {
if Linfo < Std.Level {
return
}
Std.Output("", Linfo, 2, fmt.Sprintln(v...))
}
// -----------------------------------------
func Warnf(format string, v ...interface{}) {
Std.Output("", Lwarn, 2, fmt.Sprintf(format, v...))
}
func Warn(v ...interface{}) { Std.Output("", Lwarn, 2, fmt.Sprintln(v...)) }
// -----------------------------------------
func Errorf(format string, v ...interface{}) {
Std.Output("", Lerror, 2, fmt.Sprintf(format, v...))
}
func Error(v ...interface{}) { Std.Output("", Lerror, 2, fmt.Sprintln(v...)) }
// -----------------------------------------
// Fatal is equivalent to Print() followed by a call to os.Exit(1).
func Fatal(v ...interface{}) {
Std.Output("", Lfatal, 2, fmt.Sprint(v...))
os.Exit(1)
}
// Fatalf is equivalent to Printf() followed by a call to os.Exit(1).
func Fatalf(format string, v ...interface{}) {
Std.Output("", Lfatal, 2, fmt.Sprintf(format, v...))
os.Exit(1)
}
// Fatalln is equivalent to Println() followed by a call to os.Exit(1).
func Fatalln(v ...interface{}) {
Std.Output("", Lfatal, 2, fmt.Sprintln(v...))
os.Exit(1)
}
// -----------------------------------------
// Panic is equivalent to Print() followed by a call to panic().
func Panic(v ...interface{}) {
s := fmt.Sprint(v...)
Std.Output("", Lpanic, 2, s)
panic(s)
}
// Panicf is equivalent to Printf() followed by a call to panic().
func Panicf(format string, v ...interface{}) {
s := fmt.Sprintf(format, v...)
Std.Output("", Lpanic, 2, s)
panic(s)
}
// Panicln is equivalent to Println() followed by a call to panic().
func Panicln(v ...interface{}) {
s := fmt.Sprintln(v...)
Std.Output("", Lpanic, 2, s)
panic(s)
}
// -----------------------------------------
func Stack(v ...interface{}) {
Std.Stack(v...)
}
func SingleStack(v ...interface{}) {
Std.SingleStack(v...)
}

View File

@@ -1,79 +0,0 @@
package log
import (
"bytes"
"regexp"
"testing"
"github.com/stretchr/testify/assert"
)
func TestLog(t *testing.T) {
// keep Std clean
std := Std
SetOutputLevel(Ldebug)
Debugf("Debug: foo\n")
Debug("Debug: foo")
Infof("Info: foo\n")
Info("Info: foo")
Warnf("Warn: foo\n")
Warn("Warn: foo")
Errorf("Error: foo\n")
Error("Error: foo")
SetOutputLevel(Linfo)
Debugf("Debug: foo\n")
Debug("Debug: foo")
Infof("Info: foo\n")
Info("Info: foo")
Warnf("Warn: foo\n")
Warn("Warn: foo")
Errorf("Error: foo\n")
Error("Error: foo")
Std = std
}
func TestLog_Time(t *testing.T) {
// keep Std clean
std := Std
out := bytes.Buffer{}
Std = New(&out, Std.Prefix(), Std.Flags())
assert.Equal(t, Std.Level, Linfo)
Info("test")
outStr := out.String()
assert.True(t, regexp.MustCompile(`^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}.\d{6}$`).MatchString(outStr[:26]))
assert.Equal(t, outStr[26:], " [INFO] qiniupkg.com/x/log.v7/logext_test.go:53: test\n")
Std = std
}
func TestLog_Level(t *testing.T) {
// keep Std clean
std := Std
out := bytes.Buffer{}
Std = New(&out, Std.Prefix(), Std.Flags())
SetOutputLevel(Lwarn)
assert.Equal(t, Std.Level, Lwarn)
Debug("test")
assert.Equal(t, out.String(), "")
Info("test")
assert.Equal(t, out.String(), "")
Warn("test")
outStr := out.String()
assert.Equal(t, outStr[26:], " [WARN] qiniupkg.com/x/log.v7/logext_test.go:74: test\n")
Std = std
}

View File

@@ -1,110 +0,0 @@
package mockhttp
import (
"errors"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"strconv"
"qiniupkg.com/x/log.v7"
)
var (
ErrServerNotFound = errors.New("server not found")
)
// --------------------------------------------------------------------
type mockServerRequestBody struct {
reader io.Reader
closeSignal bool
}
func (r *mockServerRequestBody) Read(p []byte) (int, error) {
if r.closeSignal || r.reader == nil {
return 0, io.EOF
}
return r.reader.Read(p)
}
func (r *mockServerRequestBody) Close() error {
r.closeSignal = true
if c, ok := r.reader.(io.Closer); ok {
return c.Close()
}
return nil
}
// --------------------------------------------------------------------
// type Transport
type Transport struct {
route map[string]http.Handler
}
func NewTransport() *Transport {
return &Transport{
route: make(map[string]http.Handler),
}
}
func (p *Transport) ListenAndServe(host string, h http.Handler) {
if h == nil {
h = http.DefaultServeMux
}
p.route[host] = h
}
func (p *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
h := p.route[req.URL.Host]
if h == nil {
log.Warn("Server not found:", req.Host)
return nil, ErrServerNotFound
}
cp := *req
cp.URL.Scheme = ""
cp.URL.Host = ""
cp.RemoteAddr = "127.0.0.1:8000"
cp.Body = &mockServerRequestBody{req.Body, false}
req = &cp
rw := httptest.NewRecorder()
h.ServeHTTP(rw, req)
req.Body.Close()
ctlen := int64(-1)
if v := rw.HeaderMap.Get("Content-Length"); v != "" {
ctlen, _ = strconv.ParseInt(v, 10, 64)
}
return &http.Response{
Status: "",
StatusCode: rw.Code,
Header: rw.HeaderMap,
Body: ioutil.NopCloser(rw.Body),
ContentLength: ctlen,
TransferEncoding: nil,
Close: false,
Trailer: nil,
Request: req,
}, nil
}
// --------------------------------------------------------------------
var DefaultTransport = NewTransport()
var DefaultClient = &http.Client{Transport: DefaultTransport}
func ListenAndServe(host string, h http.Handler) {
DefaultTransport.ListenAndServe(host, h)
}
// --------------------------------------------------------------------

View File

@@ -1,114 +0,0 @@
package mockhttp_test
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strconv"
"strings"
"testing"
"qiniupkg.com/x/mockhttp.v7"
"qiniupkg.com/x/rpc.v7"
)
// --------------------------------------------------------------------
func reply(w http.ResponseWriter, code int, data interface{}) {
msg, _ := json.Marshal(data)
h := w.Header()
h.Set("Content-Length", strconv.Itoa(len(msg)))
h.Set("Content-Type", "application/json")
w.WriteHeader(code)
w.Write(msg)
}
// --------------------------------------------------------------------
type FooRet struct {
A int `json:"a"`
B string `json:"b"`
C string `json:"c"`
}
type HandleRet map[string]string
type FooServer struct{}
func (p *FooServer) foo(w http.ResponseWriter, req *http.Request) {
reply(w, 200, &FooRet{1, req.Host, req.URL.Path})
}
func (p *FooServer) handle(w http.ResponseWriter, req *http.Request) {
reply(w, 200, HandleRet{"foo": "1", "bar": "2"})
}
func (p *FooServer) postDump(w http.ResponseWriter, req *http.Request) {
req.Body.Close()
io.Copy(w, req.Body)
}
func (p *FooServer) RegisterHandlers(mux *http.ServeMux) {
mux.HandleFunc("/foo", func(w http.ResponseWriter, req *http.Request) { p.foo(w, req) })
mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { p.handle(w, req) })
mux.HandleFunc("/dump", func(w http.ResponseWriter, req *http.Request) { p.postDump(w, req) })
}
// --------------------------------------------------------------------
func TestBasic(t *testing.T) {
server := new(FooServer)
server.RegisterHandlers(http.DefaultServeMux)
mockhttp.ListenAndServe("foo.com", nil)
c := rpc.Client{mockhttp.DefaultClient}
{
var foo FooRet
err := c.Call(nil, &foo, "POST", "http://foo.com/foo")
if err != nil {
t.Fatal("call foo failed:", err)
}
if foo.A != 1 || foo.B != "foo.com" || foo.C != "/foo" {
t.Fatal("call foo: invalid ret")
}
fmt.Println(foo)
}
{
var ret map[string]string
err := c.Call(nil, &ret, "POST", "http://foo.com/bar")
if err != nil {
t.Fatal("call foo failed:", err)
}
if ret["foo"] != "1" || ret["bar"] != "2" {
t.Fatal("call bar: invalid ret")
}
fmt.Println(ret)
}
{
resp, err := c.Post("http://foo.com/dump", "", nil)
if err != nil {
t.Fatal("post foo failed:", err)
}
resp.Body.Close()
resp, err = c.Post("http://foo.com/dump", "", strings.NewReader("abc"))
if err != nil {
t.Fatal("post foo failed:", err)
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal("ioutil.ReadAll:", err)
}
if len(b) != 0 {
t.Fatal("body should be empty:", string(b))
}
}
}
// --------------------------------------------------------------------

View File

@@ -1,52 +0,0 @@
package reqid
import (
"encoding/binary"
"encoding/base64"
"net/http"
"time"
. "golang.org/x/net/context"
)
// --------------------------------------------------------------------
var pid = uint32(time.Now().UnixNano() % 4294967291)
func genReqId() string {
var b [12]byte
binary.LittleEndian.PutUint32(b[:], pid)
binary.LittleEndian.PutUint64(b[4:], uint64(time.Now().UnixNano()))
return base64.URLEncoding.EncodeToString(b[:])
}
// --------------------------------------------------------------------
type key int // key is unexported and used for Context
const (
reqidKey key = 0
)
func NewContext(ctx Context, reqid string) Context {
return WithValue(ctx, reqidKey, reqid)
}
func NewContextWith(ctx Context, w http.ResponseWriter, req *http.Request) Context {
reqid := req.Header.Get("X-Reqid")
if reqid == "" {
reqid = genReqId()
req.Header.Set("X-Reqid", reqid)
}
h := w.Header()
h.Set("X-Reqid", reqid)
return WithValue(ctx, reqidKey, reqid)
}
func FromContext(ctx Context) (reqid string, ok bool) {
reqid, ok = ctx.Value(reqidKey).(string)
return
}
// --------------------------------------------------------------------

View File

@@ -1,107 +0,0 @@
package gob
import (
"bytes"
"encoding/gob"
"io"
"io/ioutil"
"net/http"
"strconv"
"qiniupkg.com/x/rpc.v7"
. "golang.org/x/net/context"
)
// ---------------------------------------------------------------------------
func Register(value interface{}) {
gob.Register(value)
}
func RegisterName(name string, value interface{}) {
gob.RegisterName(name, value)
}
// ---------------------------------------------------------------------------
func ResponseError(resp *http.Response) (err error) {
e := &rpc.ErrorInfo{
Reqid: resp.Header.Get("X-Reqid"),
Code: resp.StatusCode,
}
if resp.StatusCode > 299 {
e.Err = resp.Header.Get("X-Err")
if errno := resp.Header.Get("X-Errno"); errno != "" {
v, err2 := strconv.ParseInt(errno, 10, 32)
if err2 != nil {
e.Err = err2.Error()
}
e.Errno = int(v)
}
}
return e
}
func CallRet(ctx Context, ret interface{}, resp *http.Response) (err error) {
defer func() {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}()
if resp.StatusCode/100 == 2 {
if ret != nil && resp.ContentLength != 0 {
err = gob.NewDecoder(resp.Body).Decode(ret)
if err != nil {
return
}
}
if resp.StatusCode == 200 {
return nil
}
}
return ResponseError(resp)
}
// ---------------------------------------------------------------------------
type Client struct {
rpc.Client
}
var (
DefaultClient = Client{rpc.DefaultClient}
)
func (r Client) Call(
ctx Context, ret interface{}, method, url1 string) (err error) {
resp, err := r.DoRequestWith(ctx, method, url1, "application/gob", nil, 0)
if err != nil {
return err
}
return CallRet(ctx, ret, resp)
}
func (r Client) CallWithGob(
ctx Context, ret interface{}, method, url1 string, params interface{}) (err error) {
var b bytes.Buffer
err = gob.NewEncoder(&b).Encode(params)
if err != nil {
return err
}
resp, err := r.DoRequestWith(ctx, method, url1, "application/gob", &b, b.Len())
if err != nil {
return err
}
return CallRet(ctx, ret, resp)
}
// ---------------------------------------------------------------------------

View File

@@ -1,344 +0,0 @@
package rpc
import (
"bytes"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"qiniupkg.com/x/reqid.v7"
. "golang.org/x/net/context"
)
var (
UserAgent = "Golang qiniu/rpc package"
)
var (
ErrInvalidRequestURL = errors.New("invalid request url")
)
// --------------------------------------------------------------------
type Client struct {
*http.Client
}
var (
DefaultClient = Client{&http.Client{Transport: http.DefaultTransport}}
)
// --------------------------------------------------------------------
func newRequest(method, url1 string, body io.Reader) (req *http.Request, err error) {
var host string
// url1 = "-H <Host> http://<ip>[:<port>]/<path>"
//
if strings.HasPrefix(url1, "-H") {
url2 := strings.TrimLeft(url1[2:], " \t")
pos := strings.Index(url2, " ")
if pos <= 0 {
return nil, ErrInvalidRequestURL
}
host = url2[:pos]
url1 = strings.TrimLeft(url2[pos+1:], " \t")
}
req, err = http.NewRequest(method, url1, body)
if err != nil {
return
}
if host != "" {
req.Host = host
}
return
}
func (r Client) DoRequest(ctx Context, method, url string) (resp *http.Response, err error) {
req, err := newRequest(method, url, nil)
if err != nil {
return
}
return r.Do(ctx, req)
}
func (r Client) DoRequestWith(
ctx Context, method, url1 string,
bodyType string, body io.Reader, bodyLength int) (resp *http.Response, err error) {
req, err := newRequest(method, url1, body)
if err != nil {
return
}
req.Header.Set("Content-Type", bodyType)
req.ContentLength = int64(bodyLength)
return r.Do(ctx, req)
}
func (r Client) DoRequestWith64(
ctx Context, method, url1 string,
bodyType string, body io.Reader, bodyLength int64) (resp *http.Response, err error) {
req, err := newRequest(method, url1, body)
if err != nil {
return
}
req.Header.Set("Content-Type", bodyType)
req.ContentLength = bodyLength
return r.Do(ctx, req)
}
func (r Client) DoRequestWithForm(
ctx Context, method, url1 string, data map[string][]string) (resp *http.Response, err error) {
msg := url.Values(data).Encode()
if method == "GET" || method == "HEAD" || method == "DELETE" {
if strings.ContainsRune(url1, '?') {
url1 += "&"
} else {
url1 += "?"
}
return r.DoRequest(ctx, method, url1 + msg)
}
return r.DoRequestWith(
ctx, method, url1, "application/x-www-form-urlencoded", strings.NewReader(msg), len(msg))
}
func (r Client) DoRequestWithJson(
ctx Context, method, url1 string, data interface{}) (resp *http.Response, err error) {
msg, err := json.Marshal(data)
if err != nil {
return
}
return r.DoRequestWith(
ctx, method, url1, "application/json", bytes.NewReader(msg), len(msg))
}
func (r Client) Do(ctx Context, req *http.Request) (resp *http.Response, err error) {
if ctx == nil {
ctx = Background()
}
if reqid, ok := reqid.FromContext(ctx); ok {
req.Header.Set("X-Reqid", reqid)
}
if _, ok := req.Header["User-Agent"]; !ok {
req.Header.Set("User-Agent", UserAgent)
}
transport := r.Transport // don't change r.Transport
if transport == nil {
transport = http.DefaultTransport
}
// avoid cancel() is called before Do(req), but isn't accurate
select {
case <-ctx.Done():
err = ctx.Err()
return
default:
}
if tr, ok := getRequestCanceler(transport); ok { // support CancelRequest
reqC := make(chan bool, 1)
go func() {
resp, err = r.Client.Do(req)
reqC <- true
}()
select {
case <-reqC:
case <-ctx.Done():
tr.CancelRequest(req)
<-reqC
err = ctx.Err()
}
} else {
resp, err = r.Client.Do(req)
}
return
}
// --------------------------------------------------------------------
type ErrorInfo struct {
Err string `json:"error,omitempty"`
Key string `json:"key,omitempty"`
Reqid string `json:"reqid,omitempty"`
Errno int `json:"errno,omitempty"`
Code int `json:"code"`
}
func (r *ErrorInfo) ErrorDetail() string {
msg, _ := json.Marshal(r)
return string(msg)
}
func (r *ErrorInfo) Error() string {
return r.Err
}
func (r *ErrorInfo) RpcError() (code, errno int, key, err string) {
return r.Code, r.Errno, r.Key, r.Err
}
func (r *ErrorInfo) HttpCode() int {
return r.Code
}
// --------------------------------------------------------------------
func parseError(e *ErrorInfo, r io.Reader) {
body, err1 := ioutil.ReadAll(r)
if err1 != nil {
e.Err = err1.Error()
return
}
var ret struct {
Err string `json:"error"`
Key string `json:"key"`
Errno int `json:"errno"`
}
if json.Unmarshal(body, &ret) == nil && ret.Err != "" {
// qiniu error msg style returns here
e.Err, e.Key, e.Errno = ret.Err, ret.Key, ret.Errno
return
}
e.Err = string(body)
}
func ResponseError(resp *http.Response) (err error) {
e := &ErrorInfo{
Reqid: resp.Header.Get("X-Reqid"),
Code: resp.StatusCode,
}
if resp.StatusCode > 299 {
if resp.ContentLength != 0 {
ct, ok := resp.Header["Content-Type"]
if ok && strings.HasPrefix(ct[0], "application/json") {
parseError(e, resp.Body)
}
}
}
return e
}
func CallRet(ctx Context, ret interface{}, resp *http.Response) (err error) {
defer func() {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}()
if resp.StatusCode/100 == 2 {
if ret != nil && resp.ContentLength != 0 {
err = json.NewDecoder(resp.Body).Decode(ret)
if err != nil {
return
}
}
if resp.StatusCode == 200 {
return nil
}
}
return ResponseError(resp)
}
func (r Client) CallWithForm(
ctx Context, ret interface{}, method, url1 string, param map[string][]string) (err error) {
resp, err := r.DoRequestWithForm(ctx, method, url1, param)
if err != nil {
return err
}
return CallRet(ctx, ret, resp)
}
func (r Client) CallWithJson(
ctx Context, ret interface{}, method, url1 string, param interface{}) (err error) {
resp, err := r.DoRequestWithJson(ctx, method, url1, param)
if err != nil {
return err
}
return CallRet(ctx, ret, resp)
}
func (r Client) CallWith(
ctx Context, ret interface{}, method, url1, bodyType string, body io.Reader, bodyLength int) (err error) {
resp, err := r.DoRequestWith(ctx, method, url1, bodyType, body, bodyLength)
if err != nil {
return err
}
return CallRet(ctx, ret, resp)
}
func (r Client) CallWith64(
ctx Context, ret interface{}, method, url1, bodyType string, body io.Reader, bodyLength int64) (err error) {
resp, err := r.DoRequestWith64(ctx, method, url1, bodyType, body, bodyLength)
if err != nil {
return err
}
return CallRet(ctx, ret, resp)
}
func (r Client) Call(
ctx Context, ret interface{}, method, url1 string) (err error) {
resp, err := r.DoRequestWith(ctx, method, url1, "application/x-www-form-urlencoded", nil, 0)
if err != nil {
return err
}
return CallRet(ctx, ret, resp)
}
// ---------------------------------------------------------------------------
type requestCanceler interface {
CancelRequest(req *http.Request)
}
type nestedObjectGetter interface {
NestedObject() interface{}
}
func getRequestCanceler(tp http.RoundTripper) (rc requestCanceler, ok bool) {
if rc, ok = tp.(requestCanceler); ok {
return
}
p := interface{}(tp)
for {
getter, ok1 := p.(nestedObjectGetter)
if !ok1 {
return
}
p = getter.NestedObject()
if rc, ok = p.(requestCanceler); ok {
return
}
}
}
// --------------------------------------------------------------------

View File

@@ -1,66 +0,0 @@
package rpc
import (
"fmt"
"net/http"
"testing"
)
// --------------------------------------------------------------------
func TestNewRequest(t *testing.T) {
req, err := http.NewRequest("GET", "-H\t abc.com \thttp://127.0.0.1/foo/bar", nil)
if err != nil {
t.Fatal("http.NewRequest failed")
}
if req.Host != "" {
t.Fatal(`http.NewRequest: req.Host != ""`)
}
req, err = newRequest("GET", "-H\t abc.com \thttp://127.0.0.1/foo/bar", nil)
if err != nil {
t.Fatal("newRequest failed:", err)
}
fmt.Println("Host:", req.Host, "path:", req.URL.Path, "url.host:", req.URL.Host)
if req.Host != "abc.com" || req.URL.Path != "/foo/bar" || req.URL.Host != "127.0.0.1" {
t.Fatal(`req.Host != "abc.com" || req.URL.Path != "/foo/bar" || req.URL.Host != "127.0.0.1"`)
}
}
// --------------------------------------------------------------------
type transport struct {
a http.RoundTripper
}
func (p *transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
return p.a.RoundTrip(req)
}
func (p *transport) NestedObject() interface{} {
return p.a
}
func Test_getRequestCanceler(t *testing.T) {
p := &transport{a: http.DefaultTransport}
if _, ok := getRequestCanceler(p); !ok {
t.Fatal("getRequestCanceler failed")
}
p2 := &transport{a: p}
if _, ok := getRequestCanceler(p2); !ok {
t.Fatal("getRequestCanceler(p2) failed")
}
p3 := &transport{}
if _, ok := getRequestCanceler(p3); ok {
t.Fatal("getRequestCanceler(p3)?")
}
}
// --------------------------------------------------------------------

Some files were not shown because too many files have changed in this diff Show More