add vendor & update README.md

This commit is contained in:
henry.chen
2018-11-19 10:24:54 +08:00
parent 010137ebf5
commit 24d81db8be
674 changed files with 301356 additions and 22 deletions

2
vendor/github.com/qiniu/api.v7/auth/qbox/doc.go generated vendored Normal file
View File

@@ -0,0 +1,2 @@
// qbox 包提供了该SDK需要的相关鉴权方法
package qbox

147
vendor/github.com/qiniu/api.v7/auth/qbox/qbox_auth.go generated vendored Normal file
View File

@@ -0,0 +1,147 @@
package qbox
import (
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"fmt"
"io"
"net/http"
"github.com/qiniu/api.v7/conf"
"github.com/qiniu/x/bytes.v7/seekable"
)
// Mac 七牛AK/SK的对象AK/SK可以从 https://portal.qiniu.com/user/key 获取。
type Mac struct {
AccessKey string
SecretKey []byte
}
// NewMac 构建一个新的拥有AK/SK的对象
func NewMac(accessKey, secretKey string) (mac *Mac) {
return &Mac{accessKey, []byte(secretKey)}
}
// Sign 对数据进行签名,一般用于私有空间下载用途
func (mac *Mac) Sign(data []byte) (token string) {
h := hmac.New(sha1.New, mac.SecretKey)
h.Write(data)
sign := base64.URLEncoding.EncodeToString(h.Sum(nil))
return fmt.Sprintf("%s:%s", mac.AccessKey, sign)
}
// SignWithData 对数据进行签名,一般用于上传凭证的生成用途
func (mac *Mac) SignWithData(b []byte) (token string) {
encodedData := base64.URLEncoding.EncodeToString(b)
h := hmac.New(sha1.New, mac.SecretKey)
h.Write([]byte(encodedData))
digest := h.Sum(nil)
sign := base64.URLEncoding.EncodeToString(digest)
return fmt.Sprintf("%s:%s:%s", mac.AccessKey, sign, encodedData)
}
// SignRequest 对数据进行签名,一般用于管理凭证的生成
func (mac *Mac) SignRequest(req *http.Request) (token string, err error) {
h := hmac.New(sha1.New, mac.SecretKey)
u := req.URL
data := u.Path
if u.RawQuery != "" {
data += "?" + u.RawQuery
}
io.WriteString(h, data+"\n")
if incBody(req) {
s2, err2 := seekable.New(req)
if err2 != nil {
return "", err2
}
h.Write(s2.Bytes())
}
sign := base64.URLEncoding.EncodeToString(h.Sum(nil))
token = fmt.Sprintf("%s:%s", mac.AccessKey, sign)
return
}
// SignRequestV2 对数据进行签名,一般用于高级管理凭证的生成
func (mac *Mac) SignRequestV2(req *http.Request) (token string, err error) {
h := hmac.New(sha1.New, mac.SecretKey)
u := req.URL
//write method path?query
io.WriteString(h, fmt.Sprintf("%s %s", req.Method, u.Path))
if u.RawQuery != "" {
io.WriteString(h, "?")
io.WriteString(h, u.RawQuery)
}
//write host and post
io.WriteString(h, "\nHost: ")
io.WriteString(h, req.Host)
//write content type
contentType := req.Header.Get("Content-Type")
if contentType != "" {
io.WriteString(h, "\n")
io.WriteString(h, fmt.Sprintf("Content-Type: %s", contentType))
}
io.WriteString(h, "\n\n")
//write body
if incBodyV2(req) {
s2, err2 := seekable.New(req)
if err2 != nil {
return "", err2
}
h.Write(s2.Bytes())
}
sign := base64.URLEncoding.EncodeToString(h.Sum(nil))
token = fmt.Sprintf("%s:%s", mac.AccessKey, sign)
return
}
// 管理凭证生成时是否同时对request body进行签名
func incBody(req *http.Request) bool {
return req.Body != nil && req.Header.Get("Content-Type") == conf.CONTENT_TYPE_FORM
}
func incBodyV2(req *http.Request) bool {
contentType := req.Header.Get("Content-Type")
return req.Body != nil && (contentType == conf.CONTENT_TYPE_FORM || contentType == conf.CONTENT_TYPE_JSON)
}
// VerifyCallback 验证上传回调请求是否来自七牛
func (mac *Mac) VerifyCallback(req *http.Request) (bool, error) {
auth := req.Header.Get("Authorization")
if auth == "" {
return false, nil
}
token, err := mac.SignRequest(req)
if err != nil {
return false, err
}
return auth == "QBox "+token, nil
}
// Sign 一般用于下载凭证的签名
func Sign(mac *Mac, data []byte) string {
return mac.Sign(data)
}
// SignWithData 一般用于上传凭证的签名
func SignWithData(mac *Mac, data []byte) string {
return mac.SignWithData(data)
}
// VerifyCallback 验证上传回调请求是否来自七牛
func VerifyCallback(mac *Mac, req *http.Request) (bool, error) {
return mac.VerifyCallback(req)
}

9
vendor/github.com/qiniu/api.v7/conf/conf.go generated vendored Normal file
View File

@@ -0,0 +1,9 @@
package conf
const Version = "7.3.0"
const (
CONTENT_TYPE_JSON = "application/json"
CONTENT_TYPE_FORM = "application/x-www-form-urlencoded"
CONTENT_TYPE_OCTET = "application/octet-stream"
)

2
vendor/github.com/qiniu/api.v7/conf/doc.go generated vendored Normal file
View File

@@ -0,0 +1,2 @@
// conf 包提供了设置APP名称的方法。该APP名称会被放入API请求的UserAgent中方便后续查询日志分析问题。
package conf

173
vendor/github.com/qiniu/api.v7/storage/base64_upload.go generated vendored Normal file
View File

@@ -0,0 +1,173 @@
package storage
import (
"bytes"
"context"
"encoding/base64"
"fmt"
"hash/crc32"
"io"
"net/http"
"strconv"
"strings"
)
// Base64Uploader 表示一个Base64上传对象
type Base64Uploader struct {
client *Client
cfg *Config
}
// NewBase64Uploader 用来构建一个Base64上传的对象
func NewBase64Uploader(cfg *Config) *Base64Uploader {
if cfg == nil {
cfg = &Config{}
}
return &Base64Uploader{
client: &DefaultClient,
cfg: cfg,
}
}
// NewBase64UploaderEx 用来构建一个Base64上传的对象
func NewBase64UploaderEx(cfg *Config, client *Client) *Base64Uploader {
if cfg == nil {
cfg = &Config{}
}
if client == nil {
client = &DefaultClient
}
return &Base64Uploader{
client: client,
cfg: cfg,
}
}
// Base64PutExtra 为Base64上传的额外可选项
type Base64PutExtra struct {
// 可选,用户自定义参数,必须以 "x:" 开头。若不以x:开头,则忽略。
Params map[string]string
// 可选,当为 "" 时候,服务端自动判断。
MimeType string
}
// Put 用来以Base64方式上传一个文件
//
// ctx 是请求的上下文。
// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 callbackUrl 或 returnBody那么返回的数据结构是 PutRet 结构。
// uptoken 是由业务服务器颁发的上传凭证。
// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外key 为空字符串是合法的。
// base64Data 是要上传的Base64数据一般为图片数据的Base64编码字符串
// extra 是上传的一些可选项可以指定为nil。详细见 Base64PutExtra 结构的描述。
//
func (p *Base64Uploader) Put(
ctx context.Context, ret interface{}, uptoken, key string, base64Data []byte, extra *Base64PutExtra) (err error) {
return p.put(ctx, ret, uptoken, key, true, base64Data, extra)
}
// PutWithoutKey 用来以Base64方式上传一个文件保存的文件名以文件的内容hash作为文件名
func (p *Base64Uploader) PutWithoutKey(
ctx context.Context, ret interface{}, uptoken string, base64Data []byte, extra *Base64PutExtra) (err error) {
return p.put(ctx, ret, uptoken, "", false, base64Data, extra)
}
func (p *Base64Uploader) put(
ctx context.Context, ret interface{}, uptoken, key string, hasKey bool, base64Data []byte, extra *Base64PutExtra) (err error) {
//get up host
ak, bucket, gErr := getAkBucketFromUploadToken(uptoken)
if gErr != nil {
err = gErr
return
}
var upHost string
upHost, err = p.upHost(ak, bucket)
if err != nil {
return
}
//set default extra
if extra == nil {
extra = &Base64PutExtra{}
}
//calc crc32
h := crc32.NewIEEE()
rawReader := base64.NewDecoder(base64.StdEncoding, bytes.NewReader(base64Data))
fsize, decodeErr := io.Copy(h, rawReader)
if decodeErr != nil {
err = fmt.Errorf("invalid base64 data, %s", decodeErr.Error())
return
}
fCrc32 := h.Sum32()
postPath := bytes.NewBufferString("/putb64")
//add fsize
postPath.WriteString("/")
postPath.WriteString(strconv.Itoa(int(fsize)))
//add key
if hasKey {
postPath.WriteString("/key/")
postPath.WriteString(base64.URLEncoding.EncodeToString([]byte(key)))
}
//add mimeType
if extra.MimeType != "" {
postPath.WriteString("/mimeType/")
postPath.WriteString(base64.URLEncoding.EncodeToString([]byte(extra.MimeType)))
}
//add crc32
postPath.WriteString("/crc32/")
postPath.WriteString(fmt.Sprintf("%d", fCrc32))
//add extra params
if len(extra.Params) > 0 {
for k, v := range extra.Params {
if strings.HasPrefix(k, "x:") && v != "" {
postPath.WriteString("/")
postPath.WriteString(k)
postPath.WriteString("/")
postPath.WriteString(base64.URLEncoding.EncodeToString([]byte(v)))
}
}
}
postURL := fmt.Sprintf("%s%s", upHost, postPath.String())
headers := http.Header{}
headers.Add("Content-Type", "application/octet-stream")
headers.Add("Authorization", "UpToken "+uptoken)
return p.client.CallWith(ctx, ret, "POST", postURL, headers, bytes.NewReader(base64Data), len(base64Data))
}
func (p *Base64Uploader) upHost(ak, bucket string) (upHost string, err error) {
var zone *Zone
if p.cfg.Zone != nil {
zone = p.cfg.Zone
} else {
if v, zoneErr := GetZone(ak, bucket); zoneErr != nil {
err = zoneErr
return
} else {
zone = v
}
}
scheme := "http://"
if p.cfg.UseHTTPS {
scheme = "https://"
}
host := zone.SrcUpHosts[0]
if p.cfg.UseCdnDomains {
host = zone.CdnUpHosts[0]
}
upHost = fmt.Sprintf("%s%s", scheme, host)
return
}

606
vendor/github.com/qiniu/api.v7/storage/bucket.go generated vendored Normal file
View File

@@ -0,0 +1,606 @@
package storage
import (
"context"
"encoding/base64"
"errors"
"fmt"
"net/url"
"strconv"
"strings"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/conf"
"net/http"
)
// 资源管理相关的默认域名
const (
DefaultRsHost = "rs.qiniu.com"
DefaultRsfHost = "rsf.qiniu.com"
DefaultAPIHost = "api.qiniu.com"
DefaultPubHost = "pu.qbox.me:10200"
)
// FileInfo 文件基本信息
type FileInfo struct {
Hash string `json:"hash"`
Fsize int64 `json:"fsize"`
PutTime int64 `json:"putTime"`
MimeType string `json:"mimeType"`
Type int `json:"type"`
}
func (f *FileInfo) String() string {
str := ""
str += fmt.Sprintf("Hash: %s\n", f.Hash)
str += fmt.Sprintf("Fsize: %d\n", f.Fsize)
str += fmt.Sprintf("PutTime: %d\n", f.PutTime)
str += fmt.Sprintf("MimeType: %s\n", f.MimeType)
str += fmt.Sprintf("Type: %d\n", f.Type)
return str
}
// FetchRet 资源抓取的返回值
type FetchRet struct {
Hash string `json:"hash"`
Fsize int64 `json:"fsize"`
MimeType string `json:"mimeType"`
Key string `json:"key"`
}
func (r *FetchRet) String() string {
str := ""
str += fmt.Sprintf("Key: %s\n", r.Key)
str += fmt.Sprintf("Hash: %s\n", r.Hash)
str += fmt.Sprintf("Fsize: %d\n", r.Fsize)
str += fmt.Sprintf("MimeType: %s\n", r.MimeType)
return str
}
// ListItem 为文件列举的返回值
type ListItem struct {
Key string `json:"key"`
Hash string `json:"hash"`
Fsize int64 `json:"fsize"`
PutTime int64 `json:"putTime"`
MimeType string `json:"mimeType"`
Type int `json:"type"`
EndUser string `json:"endUser"`
}
func (l *ListItem) String() string {
str := ""
str += fmt.Sprintf("Hash: %s\n", l.Hash)
str += fmt.Sprintf("Fsize: %d\n", l.Fsize)
str += fmt.Sprintf("PutTime: %d\n", l.PutTime)
str += fmt.Sprintf("MimeType: %s\n", l.MimeType)
str += fmt.Sprintf("Type: %d\n", l.Type)
str += fmt.Sprintf("EndUser: %s\n", l.EndUser)
return str
}
// BatchOpRet 为批量执行操作的返回值
// 批量操作支持 statcopydeletemovechgmchtypedeleteAfterDays几个操作
// 其中 stat 为获取文件的基本信息,如果文件存在则返回基本信息,如果文件不存在返回 error 。
// 其他的操作,如果成功,则返回 code不成功会同时返回 error 信息,可以根据 error 信息来判断问题所在。
type BatchOpRet struct {
Code int `json:"code,omitempty"`
Data struct {
Hash string `json:"hash"`
Fsize int64 `json:"fsize"`
PutTime int64 `json:"putTime"`
MimeType string `json:"mimeType"`
Type int `json:"type"`
Error string `json:"error"`
} `json:"data,omitempty"`
}
// BucketManager 提供了对资源进行管理的操作
type BucketManager struct {
client *Client
mac *qbox.Mac
cfg *Config
}
// NewBucketManager 用来构建一个新的资源管理对象
func NewBucketManager(mac *qbox.Mac, cfg *Config) *BucketManager {
if cfg == nil {
cfg = &Config{}
}
return &BucketManager{
client: &DefaultClient,
mac: mac,
cfg: cfg,
}
}
// NewBucketManagerEx 用来构建一个新的资源管理对象
func NewBucketManagerEx(mac *qbox.Mac, cfg *Config, client *Client) *BucketManager {
if cfg == nil {
cfg = &Config{}
}
if client == nil {
client = &DefaultClient
}
return &BucketManager{
client: client,
mac: mac,
cfg: cfg,
}
}
// Buckets 用来获取空间列表,如果指定了 shared 参数为 true那么一同列表被授权访问的空间
func (m *BucketManager) Buckets(shared bool) (buckets []string, err error) {
ctx := context.WithValue(context.TODO(), "mac", m.mac)
var reqHost string
scheme := "http://"
if m.cfg.UseHTTPS {
scheme = "https://"
}
reqHost = fmt.Sprintf("%s%s", scheme, DefaultRsHost)
reqURL := fmt.Sprintf("%s/buckets?shared=%v", reqHost, shared)
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_FORM)
err = m.client.Call(ctx, &buckets, "POST", reqURL, headers)
return
}
// Stat 用来获取一个文件的基本信息
func (m *BucketManager) Stat(bucket, key string) (info FileInfo, err error) {
ctx := context.WithValue(context.TODO(), "mac", m.mac)
reqHost, reqErr := m.rsHost(bucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, URIStat(bucket, key))
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_FORM)
err = m.client.Call(ctx, &info, "POST", reqURL, headers)
return
}
// Delete 用来删除空间中的一个文件
func (m *BucketManager) Delete(bucket, key string) (err error) {
ctx := context.WithValue(context.TODO(), "mac", m.mac)
reqHost, reqErr := m.rsHost(bucket)
if reqErr != nil {
err = reqErr
return
}
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_FORM)
reqURL := fmt.Sprintf("%s%s", reqHost, URIDelete(bucket, key))
err = m.client.Call(ctx, nil, "POST", reqURL, headers)
return
}
// Copy 用来创建已有空间中的文件的一个新的副本
func (m *BucketManager) Copy(srcBucket, srcKey, destBucket, destKey string, force bool) (err error) {
ctx := context.WithValue(context.TODO(), "mac", m.mac)
reqHost, reqErr := m.rsHost(srcBucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, URICopy(srcBucket, srcKey, destBucket, destKey, force))
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_FORM)
err = m.client.Call(ctx, nil, "POST", reqURL, headers)
return
}
// Move 用来将空间中的一个文件移动到新的空间或者重命名
func (m *BucketManager) Move(srcBucket, srcKey, destBucket, destKey string, force bool) (err error) {
ctx := context.WithValue(context.TODO(), "mac", m.mac)
reqHost, reqErr := m.rsHost(srcBucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, URIMove(srcBucket, srcKey, destBucket, destKey, force))
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_FORM)
err = m.client.Call(ctx, nil, "POST", reqURL, headers)
return
}
// ChangeMime 用来更新文件的MimeType
func (m *BucketManager) ChangeMime(bucket, key, newMime string) (err error) {
ctx := context.WithValue(context.TODO(), "mac", m.mac)
reqHost, reqErr := m.rsHost(bucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, URIChangeMime(bucket, key, newMime))
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_FORM)
err = m.client.Call(ctx, nil, "POST", reqURL, headers)
return
}
// ChangeType 用来更新文件的存储类型0表示普通存储1表示低频存储
func (m *BucketManager) ChangeType(bucket, key string, fileType int) (err error) {
ctx := context.WithValue(context.TODO(), "mac", m.mac)
reqHost, reqErr := m.rsHost(bucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, URIChangeType(bucket, key, fileType))
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_FORM)
err = m.client.Call(ctx, nil, "POST", reqURL, headers)
return
}
// DeleteAfterDays 用来更新文件生命周期,如果 days 设置为0则表示取消文件的定期删除功能永久存储
func (m *BucketManager) DeleteAfterDays(bucket, key string, days int) (err error) {
ctx := context.WithValue(context.TODO(), "mac", m.mac)
reqHost, reqErr := m.rsHost(bucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, URIDeleteAfterDays(bucket, key, days))
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_FORM)
err = m.client.Call(ctx, nil, "POST", reqURL, headers)
return
}
// Batch 接口提供了资源管理的批量操作,支持 statcopymovedeletechgmchtypedeleteAfterDays几个接口
func (m *BucketManager) Batch(operations []string) (batchOpRet []BatchOpRet, err error) {
if len(operations) > 1000 {
err = errors.New("batch operation count exceeds the limit of 1000")
return
}
ctx := context.WithValue(context.TODO(), "mac", m.mac)
scheme := "http://"
if m.cfg.UseHTTPS {
scheme = "https://"
}
reqURL := fmt.Sprintf("%s%s/batch", scheme, DefaultRsHost)
params := map[string][]string{
"op": operations,
}
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_FORM)
err = m.client.CallWithForm(ctx, &batchOpRet, "POST", reqURL, headers, params)
return
}
// Fetch 根据提供的远程资源链接来抓取一个文件到空间并已指定文件名保存
func (m *BucketManager) Fetch(resURL, bucket, key string) (fetchRet FetchRet, err error) {
ctx := context.WithValue(context.TODO(), "mac", m.mac)
reqHost, reqErr := m.iovipHost(bucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, uriFetch(resURL, bucket, key))
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_FORM)
err = m.client.Call(ctx, &fetchRet, "POST", reqURL, headers)
return
}
// FetchWithoutKey 根据提供的远程资源链接来抓取一个文件到空间并以文件的内容hash作为文件名
func (m *BucketManager) FetchWithoutKey(resURL, bucket string) (fetchRet FetchRet, err error) {
ctx := context.WithValue(context.TODO(), "mac", m.mac)
reqHost, reqErr := m.iovipHost(bucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, uriFetchWithoutKey(resURL, bucket))
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_FORM)
err = m.client.Call(ctx, &fetchRet, "POST", reqURL, headers)
return
}
// Prefetch 用来同步镜像空间的资源和镜像源资源内容
func (m *BucketManager) Prefetch(bucket, key string) (err error) {
ctx := context.WithValue(context.TODO(), "mac", m.mac)
reqHost, reqErr := m.iovipHost(bucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s%s", reqHost, uriPrefetch(bucket, key))
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_FORM)
err = m.client.Call(ctx, nil, "POST", reqURL, headers)
return
}
// SetImage 用来设置空间镜像源
func (m *BucketManager) SetImage(siteURL, bucket string) (err error) {
ctx := context.WithValue(context.TODO(), "mac", m.mac)
reqURL := fmt.Sprintf("http://%s%s", DefaultPubHost, uriSetImage(siteURL, bucket))
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_FORM)
err = m.client.Call(ctx, nil, "POST", reqURL, headers)
return
}
// SetImageWithHost 用来设置空间镜像源额外添加回源Host头部
func (m *BucketManager) SetImageWithHost(siteURL, bucket, host string) (err error) {
ctx := context.WithValue(context.TODO(), "mac", m.mac)
reqURL := fmt.Sprintf("http://%s%s", DefaultPubHost,
uriSetImageWithHost(siteURL, bucket, host))
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_FORM)
err = m.client.Call(ctx, nil, "POST", reqURL, headers)
return
}
// UnsetImage 用来取消空间镜像源设置
func (m *BucketManager) UnsetImage(bucket string) (err error) {
ctx := context.WithValue(context.TODO(), "mac", m.mac)
reqURL := fmt.Sprintf("http://%s%s", DefaultPubHost, uriUnsetImage(bucket))
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_FORM)
err = m.client.Call(ctx, nil, "POST", reqURL, headers)
return err
}
type listFilesRet struct {
Marker string `json:"marker"`
Items []ListItem `json:"items"`
CommonPrefixes []string `json:"commonPrefixes"`
}
// ListFiles 用来获取空间文件列表,可以根据需要指定文件的前缀 prefix文件的目录 delimiter循环列举的时候下次
// 列举的位置 marker以及每次返回的文件的最大数量limit其中limit最大为1000。
func (m *BucketManager) ListFiles(bucket, prefix, delimiter, marker string,
limit int) (entries []ListItem, commonPrefixes []string, nextMarker string, hasNext bool, err error) {
if limit <= 0 || limit > 1000 {
err = errors.New("invalid list limit, only allow [1, 1000]")
return
}
ctx := context.WithValue(context.TODO(), "mac", m.mac)
reqHost, reqErr := m.rsfHost(bucket)
if reqErr != nil {
err = reqErr
return
}
ret := listFilesRet{}
reqURL := fmt.Sprintf("%s%s", reqHost, uriListFiles(bucket, prefix, delimiter, marker, limit))
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_FORM)
err = m.client.Call(ctx, &ret, "POST", reqURL, headers)
if err != nil {
return
}
commonPrefixes = ret.CommonPrefixes
nextMarker = ret.Marker
entries = ret.Items
if ret.Marker != "" {
hasNext = true
}
return
}
type AsyncFetchParam struct {
Url string `json:"url"`
Host string `json:"host,omitempty"`
Bucket string `json:"bucket"`
Key string `json:"key,omitempty"`
Md5 string `json:"md5,omitempty"`
Etag string `json:"etag,omitempty"`
CallbackURL string `json:"callbackurl,omitempty"`
CallbackBody string `json:"callbackbody,omitempty"`
CallbackBodyType string `json:"callbackbodytype,omitempty"`
FileType int `json:"file_type,omitempty"`
}
type AsyncFetchRet struct {
Id string `json:"id"`
Wait int `json:"wait"`
}
func (m *BucketManager) AsyncFetch(param AsyncFetchParam) (ret AsyncFetchRet, err error) {
reqUrl, err := m.apiHost(param.Bucket)
if err != nil {
return
}
reqUrl += "/sisyphus/fetch"
ctx := context.WithValue(context.TODO(), "mac", m.mac)
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_JSON)
err = m.client.CallWithJson(ctx, &ret, "POST", reqUrl, headers, param)
return
}
func (m *BucketManager) rsHost(bucket string) (rsHost string, err error) {
zone, err := m.Zone(bucket)
if err != nil {
return
}
rsHost = zone.GetRsHost(m.cfg.UseHTTPS)
return
}
func (m *BucketManager) rsfHost(bucket string) (rsfHost string, err error) {
zone, err := m.Zone(bucket)
if err != nil {
return
}
rsfHost = zone.GetRsfHost(m.cfg.UseHTTPS)
return
}
func (m *BucketManager) iovipHost(bucket string) (iovipHost string, err error) {
zone, err := m.Zone(bucket)
if err != nil {
return
}
iovipHost = zone.GetIoHost(m.cfg.UseHTTPS)
return
}
func (m *BucketManager) apiHost(bucket string) (apiHost string, err error) {
zone, err := m.Zone(bucket)
if err != nil {
return
}
apiHost = zone.GetApiHost(m.cfg.UseHTTPS)
return
}
func (m *BucketManager) Zone(bucket string) (z *Zone, err error) {
if m.cfg.Zone != nil {
z = m.cfg.Zone
return
}
z, err = GetZone(m.mac.AccessKey, bucket)
return
}
// 构建op的方法导出的方法支持在Batch操作中使用
// URIStat 构建 stat 接口的请求命令
func URIStat(bucket, key string) string {
return fmt.Sprintf("/stat/%s", EncodedEntry(bucket, key))
}
// URIDelete 构建 delete 接口的请求命令
func URIDelete(bucket, key string) string {
return fmt.Sprintf("/delete/%s", EncodedEntry(bucket, key))
}
// URICopy 构建 copy 接口的请求命令
func URICopy(srcBucket, srcKey, destBucket, destKey string, force bool) string {
return fmt.Sprintf("/copy/%s/%s/force/%v", EncodedEntry(srcBucket, srcKey),
EncodedEntry(destBucket, destKey), force)
}
// URIMove 构建 move 接口的请求命令
func URIMove(srcBucket, srcKey, destBucket, destKey string, force bool) string {
return fmt.Sprintf("/move/%s/%s/force/%v", EncodedEntry(srcBucket, srcKey),
EncodedEntry(destBucket, destKey), force)
}
// URIDeleteAfterDays 构建 deleteAfterDays 接口的请求命令
func URIDeleteAfterDays(bucket, key string, days int) string {
return fmt.Sprintf("/deleteAfterDays/%s/%d", EncodedEntry(bucket, key), days)
}
// URIChangeMime 构建 chgm 接口的请求命令
func URIChangeMime(bucket, key, newMime string) string {
return fmt.Sprintf("/chgm/%s/mime/%s", EncodedEntry(bucket, key),
base64.URLEncoding.EncodeToString([]byte(newMime)))
}
// URIChangeType 构建 chtype 接口的请求命令
func URIChangeType(bucket, key string, fileType int) string {
return fmt.Sprintf("/chtype/%s/type/%d", EncodedEntry(bucket, key), fileType)
}
// 构建op的方法非导出的方法无法用在Batch操作中
func uriFetch(resURL, bucket, key string) string {
return fmt.Sprintf("/fetch/%s/to/%s",
base64.URLEncoding.EncodeToString([]byte(resURL)), EncodedEntry(bucket, key))
}
func uriFetchWithoutKey(resURL, bucket string) string {
return fmt.Sprintf("/fetch/%s/to/%s",
base64.URLEncoding.EncodeToString([]byte(resURL)), EncodedEntryWithoutKey(bucket))
}
func uriPrefetch(bucket, key string) string {
return fmt.Sprintf("/prefetch/%s", EncodedEntry(bucket, key))
}
func uriSetImage(siteURL, bucket string) string {
return fmt.Sprintf("/image/%s/from/%s", bucket,
base64.URLEncoding.EncodeToString([]byte(siteURL)))
}
func uriSetImageWithHost(siteURL, bucket, host string) string {
return fmt.Sprintf("/image/%s/from/%s/host/%s", bucket,
base64.URLEncoding.EncodeToString([]byte(siteURL)),
base64.URLEncoding.EncodeToString([]byte(host)))
}
func uriUnsetImage(bucket string) string {
return fmt.Sprintf("/unimage/%s", bucket)
}
func uriListFiles(bucket, prefix, delimiter, marker string, limit int) string {
query := make(url.Values)
query.Add("bucket", bucket)
if prefix != "" {
query.Add("prefix", prefix)
}
if delimiter != "" {
query.Add("delimiter", delimiter)
}
if marker != "" {
query.Add("marker", marker)
}
if limit > 0 {
query.Add("limit", strconv.FormatInt(int64(limit), 10))
}
return fmt.Sprintf("/list?%s", query.Encode())
}
// EncodedEntry 生成URL Safe Base64编码的 Entry
func EncodedEntry(bucket, key string) string {
entry := fmt.Sprintf("%s:%s", bucket, key)
return base64.URLEncoding.EncodeToString([]byte(entry))
}
// EncodedEntryWithoutKey 生成 key 为null的情况下 URL Safe Base64编码的Entry
func EncodedEntryWithoutKey(bucket string) string {
return base64.URLEncoding.EncodeToString([]byte(bucket))
}
// MakePublicURL 用来生成公开空间资源下载链接
func MakePublicURL(domain, key string) (finalUrl string) {
domain = strings.TrimRight(domain, "/")
srcUrl := fmt.Sprintf("%s/%s", domain, key)
srcUri, _ := url.Parse(srcUrl)
finalUrl = srcUri.String()
return
}
// MakePrivateURL 用来生成私有空间资源下载链接
func MakePrivateURL(mac *qbox.Mac, domain, key string, deadline int64) (privateURL string) {
publicURL := MakePublicURL(domain, key)
urlToSign := publicURL
if strings.Contains(publicURL, "?") {
urlToSign = fmt.Sprintf("%s&e=%d", urlToSign, deadline)
} else {
urlToSign = fmt.Sprintf("%s?e=%d", urlToSign, deadline)
}
token := mac.Sign([]byte(urlToSign))
privateURL = fmt.Sprintf("%s&token=%s", urlToSign, token)
return
}

8
vendor/github.com/qiniu/api.v7/storage/config.go generated vendored Normal file
View File

@@ -0,0 +1,8 @@
package storage
// Config 为文件上传,资源管理等配置
type Config struct {
Zone *Zone //空间所在的机房
UseHTTPS bool //是否使用https域名
UseCdnDomains bool //是否使用cdn加速域名
}

10
vendor/github.com/qiniu/api.v7/storage/doc.go generated vendored Normal file
View File

@@ -0,0 +1,10 @@
// storage 包提供了资源的上传,管理,数据处理等功能。其中资源的上传又提供了表单上传的方式以及分片上传的方式,其中分片上传的方式还支持断点续传。
//
// 该包中提供了 BucketManager 用来进行资源管理,比如获取文件信息,文件复制,删除,重命名等,以及很多高级功能如修改文件类型,
// 修改文件的生命周期,修改文件的存储类型等。
//
// 该包中还提供了 FormUploader 和 ResumeUploader 来分别支持表单上传和分片上传断点续传等功能对于较大的文件比如100MB以上的文件一般
// 建议采用分片上传的方式来保证上传的效率和可靠性。
//
// 对于数据处理,则提供了 OperationManager可以使用它来发送持久化数据处理请求及查询数据处理的状态。
package storage

346
vendor/github.com/qiniu/api.v7/storage/form_upload.go generated vendored Normal file
View File

@@ -0,0 +1,346 @@
package storage
import (
"bytes"
"context"
"fmt"
"hash"
"hash/crc32"
"io"
"mime/multipart"
"net/http"
"net/textproto"
"os"
"path"
"path/filepath"
"strings"
)
// PutExtra 为表单上传的额外可选项
type PutExtra struct {
// 可选,用户自定义参数,必须以 "x:" 开头。若不以x:开头,则忽略。
Params map[string]string
// 可选,当为 "" 时候,服务端自动判断。
MimeType string
// 上传事件:进度通知。这个事件的回调函数应该尽可能快地结束。
OnProgress func(fsize, uploaded int64)
}
// PutRet 为七牛标准的上传回复内容。
// 如果使用了上传回调或者自定义了returnBody那么需要根据实际情况自己自定义一个返回值结构体
type PutRet struct {
Hash string `json:"hash"`
PersistentID string `json:"persistentId"`
Key string `json:"key"`
}
// FormUploader 表示一个表单上传的对象
type FormUploader struct {
client *Client
cfg *Config
}
// NewFormUploader 用来构建一个表单上传的对象
func NewFormUploader(cfg *Config) *FormUploader {
if cfg == nil {
cfg = &Config{}
}
return &FormUploader{
client: &DefaultClient,
cfg: cfg,
}
}
// NewFormUploaderEx 用来构建一个表单上传的对象
func NewFormUploaderEx(cfg *Config, client *Client) *FormUploader {
if cfg == nil {
cfg = &Config{}
}
if client == nil {
client = &DefaultClient
}
return &FormUploader{
client: client,
cfg: cfg,
}
}
// PutFile 用来以表单方式上传一个文件,和 Put 不同的只是一个通过提供文件路径来访问文件内容,一个通过 io.Reader 来访问。
//
// ctx 是请求的上下文。
// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 callbackUrl 或 returnBody那么返回的数据结构是 PutRet 结构。
// uptoken 是由业务服务器颁发的上传凭证。
// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外key 为空字符串是合法的。
// localFile 是要上传的文件的本地路径。
// extra 是上传的一些可选项可以指定为nil。详细见 PutExtra 结构的描述。
//
func (p *FormUploader) PutFile(
ctx context.Context, ret interface{}, uptoken, key, localFile string, extra *PutExtra) (err error) {
return p.putFile(ctx, ret, uptoken, key, true, localFile, extra)
}
// PutFileWithoutKey 用来以表单方式上传一个文件。不指定文件上传后保存的key的情况下文件命名方式首先看看
// uptoken 中是否设置了 saveKey如果设置了 saveKey那么按 saveKey 要求的规则生成 key否则自动以文件的 hash 做 key。
// 和 Put 不同的只是一个通过提供文件路径来访问文件内容,一个通过 io.Reader 来访问。
//
// ctx 是请求的上下文。
// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 CallbackUrl 或 ReturnBody那么返回的数据结构是 PutRet 结构。
// uptoken 是由业务服务器颁发的上传凭证。
// localFile 是要上传的文件的本地路径。
// extra 是上传的一些可选项。可以指定为nil。详细见 PutExtra 结构的描述。
//
func (p *FormUploader) PutFileWithoutKey(
ctx context.Context, ret interface{}, uptoken, localFile string, extra *PutExtra) (err error) {
return p.putFile(ctx, ret, uptoken, "", false, localFile, extra)
}
func (p *FormUploader) putFile(
ctx context.Context, ret interface{}, uptoken string,
key string, hasKey bool, localFile string, extra *PutExtra) (err error) {
f, err := os.Open(localFile)
if err != nil {
return
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return
}
fsize := fi.Size()
if extra == nil {
extra = &PutExtra{}
}
return p.put(ctx, ret, uptoken, key, hasKey, f, fsize, extra, filepath.Base(localFile))
}
// Put 用来以表单方式上传一个文件。
//
// ctx 是请求的上下文。
// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 callbackUrl 或 returnBody那么返回的数据结构是 PutRet 结构。
// uptoken 是由业务服务器颁发的上传凭证。
// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外key 为空字符串是合法的。
// data 是文件内容的访问接口io.Reader
// fsize 是要上传的文件大小。
// extra 是上传的一些可选项。可以指定为nil。详细见 PutExtra 结构的描述。
//
func (p *FormUploader) Put(
ctx context.Context, ret interface{}, uptoken, key string, data io.Reader, size int64, extra *PutExtra) (err error) {
err = p.put(ctx, ret, uptoken, key, true, data, size, extra, path.Base(key))
return
}
// PutWithoutKey 用来以表单方式上传一个文件。不指定文件上传后保存的key的情况下文件命名方式首先看看 uptoken 中是否设置了 saveKey
// 如果设置了 saveKey那么按 saveKey 要求的规则生成 key否则自动以文件的 hash 做 key。
//
// ctx 是请求的上下文。
// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 CallbackUrl 或 ReturnBody那么返回的数据结构是 PutRet 结构。
// uptoken 是由业务服务器颁发的上传凭证。
// data 是文件内容的访问接口io.Reader
// fsize 是要上传的文件大小。
// extra 是上传的一些可选项。详细见 PutExtra 结构的描述。
//
func (p *FormUploader) PutWithoutKey(
ctx context.Context, ret interface{}, uptoken string, data io.Reader, size int64, extra *PutExtra) (err error) {
err = p.put(ctx, ret, uptoken, "", false, data, size, extra, "filename")
return err
}
func (p *FormUploader) put(
ctx context.Context, ret interface{}, uptoken string,
key string, hasKey bool, data io.Reader, size int64, extra *PutExtra, fileName string) (err error) {
ak, bucket, gErr := getAkBucketFromUploadToken(uptoken)
if gErr != nil {
err = gErr
return
}
var upHost string
upHost, err = p.upHost(ak, bucket)
if err != nil {
return
}
var b bytes.Buffer
writer := multipart.NewWriter(&b)
if extra == nil {
extra = &PutExtra{}
}
if extra.OnProgress != nil {
data = &readerWithProgress{reader: data, fsize: size, onProgress: extra.OnProgress}
}
err = writeMultipart(writer, uptoken, key, hasKey, extra, fileName)
if err != nil {
return
}
var dataReader io.Reader
h := crc32.NewIEEE()
dataReader = io.TeeReader(data, h)
crcReader := newCrc32Reader(writer.Boundary(), h)
//write file
head := make(textproto.MIMEHeader)
head.Set("Content-Disposition", fmt.Sprintf(`form-data; name="file"; filename="%s"`,
escapeQuotes(fileName)))
if extra.MimeType != "" {
head.Set("Content-Type", extra.MimeType)
}
_, err = writer.CreatePart(head)
if err != nil {
return
}
lastLine := fmt.Sprintf("\r\n--%s--\r\n", writer.Boundary())
r := strings.NewReader(lastLine)
bodyLen := int64(-1)
if size >= 0 {
bodyLen = int64(b.Len()) + size + int64(len(lastLine))
bodyLen += crcReader.length()
}
mr := io.MultiReader(&b, dataReader, crcReader, r)
contentType := writer.FormDataContentType()
headers := http.Header{}
headers.Add("Content-Type", contentType)
err = p.client.CallWith64(ctx, ret, "POST", upHost, headers, mr, bodyLen)
if err != nil {
return
}
if extra.OnProgress != nil {
extra.OnProgress(size, size)
}
return
}
type crc32Reader struct {
h hash.Hash32
boundary string
r io.Reader
flag bool
nlDashBoundaryNl string
header string
crc32PadLen int64
}
func newCrc32Reader(boundary string, h hash.Hash32) *crc32Reader {
nlDashBoundaryNl := fmt.Sprintf("\r\n--%s\r\n", boundary)
header := `Content-Disposition: form-data; name="crc32"` + "\r\n\r\n"
return &crc32Reader{
h: h,
boundary: boundary,
nlDashBoundaryNl: nlDashBoundaryNl,
header: header,
crc32PadLen: 10,
}
}
func (r *crc32Reader) Read(p []byte) (int, error) {
if r.flag == false {
crc32Sum := r.h.Sum32()
crc32Line := r.nlDashBoundaryNl + r.header + fmt.Sprintf("%010d", crc32Sum) //padding crc32 results to 10 digits
r.r = strings.NewReader(crc32Line)
r.flag = true
}
return r.r.Read(p)
}
func (r crc32Reader) length() (length int64) {
return int64(len(r.nlDashBoundaryNl+r.header)) + r.crc32PadLen
}
func (p *FormUploader) upHost(ak, bucket string) (upHost string, err error) {
var zone *Zone
if p.cfg.Zone != nil {
zone = p.cfg.Zone
} else {
if v, zoneErr := GetZone(ak, bucket); zoneErr != nil {
err = zoneErr
return
} else {
zone = v
}
}
scheme := "http://"
if p.cfg.UseHTTPS {
scheme = "https://"
}
host := zone.SrcUpHosts[0]
if p.cfg.UseCdnDomains {
host = zone.CdnUpHosts[0]
}
upHost = fmt.Sprintf("%s%s", scheme, host)
return
}
type readerWithProgress struct {
reader io.Reader
uploaded int64
fsize int64
onProgress func(fsize, uploaded int64)
}
func (p *readerWithProgress) Read(b []byte) (n int, err error) {
if p.uploaded > 0 {
p.onProgress(p.fsize, p.uploaded)
}
n, err = p.reader.Read(b)
p.uploaded += int64(n)
return
}
func writeMultipart(writer *multipart.Writer, uptoken, key string, hasKey bool,
extra *PutExtra, fileName string) (err error) {
//token
if err = writer.WriteField("token", uptoken); err != nil {
return
}
//key
if hasKey {
if err = writer.WriteField("key", key); err != nil {
return
}
}
//extra.Params
if extra.Params != nil {
for k, v := range extra.Params {
if strings.HasPrefix(k, "x:") && v != "" {
err = writer.WriteField(k, v)
if err != nil {
return
}
}
}
}
return err
}
var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
func escapeQuotes(s string) string {
return quoteEscaper.Replace(s)
}

211
vendor/github.com/qiniu/api.v7/storage/pfop.go generated vendored Normal file
View File

@@ -0,0 +1,211 @@
package storage
import (
"context"
"fmt"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/conf"
"net/http"
)
// OperationManager 提供了数据处理相关的方法
type OperationManager struct {
client *Client
mac *qbox.Mac
cfg *Config
}
// NewOperationManager 用来构建一个新的数据处理对象
func NewOperationManager(mac *qbox.Mac, cfg *Config) *OperationManager {
if cfg == nil {
cfg = &Config{}
}
return &OperationManager{
client: &DefaultClient,
mac: mac,
cfg: cfg,
}
}
// NewOperationManager 用来构建一个新的数据处理对象
func NewOperationManagerEx(mac *qbox.Mac, cfg *Config, client *Client) *OperationManager {
if cfg == nil {
cfg = &Config{}
}
if client == nil {
client = &DefaultClient
}
return &OperationManager{
client: client,
mac: mac,
cfg: cfg,
}
}
// PfopRet 为数据处理请求的回复内容
type PfopRet struct {
PersistentID string `json:"persistentId,omitempty"`
}
// PrefopRet 为数据处理请求的状态查询回复内容
type PrefopRet struct {
ID string `json:"id"`
Code int `json:"code"`
Desc string `json:"desc"`
InputBucket string `json:"inputBucket,omitempty"`
InputKey string `json:"inputKey,omitempty"`
Pipeline string `json:"pipeline,omitempty"`
Reqid string `json:"reqid,omitempty"`
Items []FopResult
}
func (r *PrefopRet) String() string {
strData := fmt.Sprintf("Id: %s\r\nCode: %d\r\nDesc: %s\r\n", r.ID, r.Code, r.Desc)
if r.InputBucket != "" {
strData += fmt.Sprintln(fmt.Sprintf("InputBucket: %s", r.InputBucket))
}
if r.InputKey != "" {
strData += fmt.Sprintln(fmt.Sprintf("InputKey: %s", r.InputKey))
}
if r.Pipeline != "" {
strData += fmt.Sprintln(fmt.Sprintf("Pipeline: %s", r.Pipeline))
}
if r.Reqid != "" {
strData += fmt.Sprintln(fmt.Sprintf("Reqid: %s", r.Reqid))
}
strData = fmt.Sprintln(strData)
for _, item := range r.Items {
strData += fmt.Sprintf("\tCmd:\t%s\r\n\tCode:\t%d\r\n\tDesc:\t%s\r\n", item.Cmd, item.Code, item.Desc)
if item.Error != "" {
strData += fmt.Sprintf("\tError:\t%s\r\n", item.Error)
} else {
if item.Hash != "" {
strData += fmt.Sprintf("\tHash:\t%s\r\n", item.Hash)
}
if item.Key != "" {
strData += fmt.Sprintf("\tKey:\t%s\r\n", item.Key)
}
if item.Keys != nil {
if len(item.Keys) > 0 {
strData += "\tKeys: {\r\n"
for _, key := range item.Keys {
strData += fmt.Sprintf("\t\t%s\r\n", key)
}
strData += "\t}\r\n"
}
}
}
strData += "\r\n"
}
return strData
}
// FopResult 云处理操作列表,包含每个云处理操作的状态信息
type FopResult struct {
Cmd string `json:"cmd"`
Code int `json:"code"`
Desc string `json:"desc"`
Error string `json:"error,omitempty"`
Hash string `json:"hash,omitempty"`
Key string `json:"key,omitempty"`
Keys []string `json:"keys,omitempty"`
}
// Pfop 持久化数据处理
//
// bucket 资源空间
// key 源资源名
// fops 云处理操作列表,
// notifyURL 处理结果通知接收URL
// pipeline 多媒体处理队列名称
// force 强制执行数据处理
//
func (m *OperationManager) Pfop(bucket, key, fops, pipeline, notifyURL string,
force bool) (persistentID string, err error) {
pfopParams := map[string][]string{
"bucket": []string{bucket},
"key": []string{key},
"fops": []string{fops},
}
if pipeline != "" {
pfopParams["pipeline"] = []string{pipeline}
}
if notifyURL != "" {
pfopParams["notifyURL"] = []string{notifyURL}
}
if force {
pfopParams["force"] = []string{"1"}
}
var ret PfopRet
ctx := context.WithValue(context.TODO(), "mac", m.mac)
reqHost, reqErr := m.apiHost(bucket)
if reqErr != nil {
err = reqErr
return
}
reqURL := fmt.Sprintf("%s/pfop/", reqHost)
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_FORM)
err = m.client.CallWithForm(ctx, &ret, "POST", reqURL, headers, pfopParams)
if err != nil {
return
}
persistentID = ret.PersistentID
return
}
// Prefop 持久化处理状态查询
func (m *OperationManager) Prefop(persistentID string) (ret PrefopRet, err error) {
ctx := context.TODO()
reqHost := m.prefopApiHost(persistentID)
reqURL := fmt.Sprintf("%s/status/get/prefop?id=%s", reqHost, persistentID)
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_FORM)
err = m.client.Call(ctx, &ret, "GET", reqURL, headers)
return
}
func (m *OperationManager) apiHost(bucket string) (apiHost string, err error) {
var zone *Zone
if m.cfg.Zone != nil {
zone = m.cfg.Zone
} else {
if v, zoneErr := GetZone(m.mac.AccessKey, bucket); zoneErr != nil {
err = zoneErr
return
} else {
zone = v
}
}
scheme := "http://"
if m.cfg.UseHTTPS {
scheme = "https://"
}
apiHost = fmt.Sprintf("%s%s", scheme, zone.ApiHost)
return
}
func (m *OperationManager) prefopApiHost(persistentID string) (apiHost string) {
apiHost = "api.qiniu.com"
if m.cfg.Zone != nil {
apiHost = m.cfg.Zone.ApiHost
}
if m.cfg.UseHTTPS {
apiHost = fmt.Sprintf("https://%s", apiHost)
} else {
apiHost = fmt.Sprintf("http://%s", apiHost)
}
return
}

184
vendor/github.com/qiniu/api.v7/storage/resume_base.go generated vendored Normal file
View File

@@ -0,0 +1,184 @@
package storage
import (
"context"
"encoding/base64"
"fmt"
"hash/crc32"
"io"
"net/http"
"strconv"
"github.com/qiniu/api.v7/conf"
"github.com/qiniu/x/bytes.v7"
"github.com/qiniu/x/xlog.v7"
)
// ResumeUploader 表示一个分片上传的对象
type ResumeUploader struct {
client *Client
cfg *Config
}
// NewResumeUploader 表示构建一个新的分片上传的对象
func NewResumeUploader(cfg *Config) *ResumeUploader {
if cfg == nil {
cfg = &Config{}
}
return &ResumeUploader{
cfg: cfg,
client: &DefaultClient,
}
}
// NewResumeUploaderEx 表示构建一个新的分片上传的对象
func NewResumeUploaderEx(cfg *Config, client *Client) *ResumeUploader {
if cfg == nil {
cfg = &Config{}
}
if client == nil {
client = &DefaultClient
}
return &ResumeUploader{
client: client,
cfg: cfg,
}
}
// 创建块请求
func (p *ResumeUploader) Mkblk(
ctx context.Context, upToken string, upHost string, ret *BlkputRet, blockSize int, body io.Reader, size int) error {
reqUrl := upHost + "/mkblk/" + strconv.Itoa(blockSize)
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_OCTET)
headers.Add("Authorization", "UpToken "+upToken)
return p.client.CallWith(ctx, ret, "POST", reqUrl, headers, body, size)
}
// 发送bput请求
func (p *ResumeUploader) Bput(
ctx context.Context, upToken string, ret *BlkputRet, body io.Reader, size int) error {
reqUrl := ret.Host + "/bput/" + ret.Ctx + "/" + strconv.FormatUint(uint64(ret.Offset), 10)
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_OCTET)
headers.Add("Authorization", "UpToken "+upToken)
return p.client.CallWith(ctx, ret, "POST", reqUrl, headers, body, size)
}
// 分片上传请求
func (p *ResumeUploader) resumableBput(
ctx context.Context, upToken string, upHost string, ret *BlkputRet, f io.ReaderAt, blkIdx, blkSize int, extra *RputExtra) (err error) {
log := xlog.NewWith(ctx)
h := crc32.NewIEEE()
offbase := int64(blkIdx) << blockBits
chunkSize := extra.ChunkSize
var bodyLength int
if ret.Ctx == "" {
if chunkSize < blkSize {
bodyLength = chunkSize
} else {
bodyLength = blkSize
}
body1 := io.NewSectionReader(f, offbase, int64(bodyLength))
body := io.TeeReader(body1, h)
err = p.Mkblk(ctx, upToken, upHost, ret, blkSize, body, bodyLength)
if err != nil {
return
}
if ret.Crc32 != h.Sum32() || int(ret.Offset) != bodyLength {
err = ErrUnmatchedChecksum
return
}
extra.Notify(blkIdx, blkSize, ret)
}
for int(ret.Offset) < blkSize {
if chunkSize < blkSize-int(ret.Offset) {
bodyLength = chunkSize
} else {
bodyLength = blkSize - int(ret.Offset)
}
tryTimes := extra.TryTimes
lzRetry:
h.Reset()
body1 := io.NewSectionReader(f, offbase+int64(ret.Offset), int64(bodyLength))
body := io.TeeReader(body1, h)
err = p.Bput(ctx, upToken, ret, body, bodyLength)
if err == nil {
if ret.Crc32 == h.Sum32() {
extra.Notify(blkIdx, blkSize, ret)
continue
}
log.Warn("ResumableBlockput: invalid checksum, retry")
err = ErrUnmatchedChecksum
} else {
if ei, ok := err.(*ErrorInfo); ok && ei.Code == InvalidCtx {
ret.Ctx = "" // reset
log.Warn("ResumableBlockput: invalid ctx, please retry")
return
}
log.Warn("ResumableBlockput: bput failed -", err)
}
if tryTimes > 1 {
tryTimes--
log.Info("ResumableBlockput retrying ...")
goto lzRetry
}
break
}
return
}
// 创建文件请求
func (p *ResumeUploader) Mkfile(
ctx context.Context, upToken string, upHost string, ret interface{}, key string, hasKey bool, fsize int64, extra *RputExtra) (err error) {
url := upHost + "/mkfile/" + strconv.FormatInt(fsize, 10)
if extra.MimeType != "" {
url += "/mimeType/" + encode(extra.MimeType)
}
if hasKey {
url += "/key/" + encode(key)
}
for k, v := range extra.Params {
url += fmt.Sprintf("/%s/%s", k, encode(v))
}
buf := make([]byte, 0, 196*len(extra.Progresses))
for _, prog := range extra.Progresses {
buf = append(buf, prog.Ctx...)
buf = append(buf, ',')
}
if len(buf) > 0 {
buf = buf[:len(buf)-1]
}
headers := http.Header{}
headers.Add("Content-Type", conf.CONTENT_TYPE_OCTET)
headers.Add("Authorization", "UpToken "+upToken)
return p.client.CallWith(
ctx, ret, "POST", url, headers, bytes.NewReader(buf), len(buf))
}
func encode(raw string) string {
return base64.URLEncoding.EncodeToString([]byte(raw))
}

310
vendor/github.com/qiniu/api.v7/storage/resume_upload.go generated vendored Normal file
View File

@@ -0,0 +1,310 @@
package storage
import (
"context"
"errors"
"fmt"
"io"
"os"
"sync"
"github.com/qiniu/x/xlog.v7"
)
// 分片上传过程中可能遇到的错误
var (
ErrInvalidPutProgress = errors.New("invalid put progress")
ErrPutFailed = errors.New("resumable put failed")
ErrUnmatchedChecksum = errors.New("unmatched checksum")
ErrBadToken = errors.New("invalid token")
)
// 上传进度过期错误
const (
InvalidCtx = 701 // UP: 无效的上下文(bput)可能情况Ctx非法或者已经被淘汰太久未使用
)
// 分片上传默认参数设置
const (
defaultWorkers = 4 // 默认的并发上传的块数量
defaultChunkSize = 4 * 1024 * 1024 // 默认的分片大小4MB
defaultTryTimes = 3 // bput 失败重试次数
)
// Settings 为分片上传设置
type Settings struct {
TaskQsize int // 可选。任务队列大小。为 0 表示取 Workers * 4。
Workers int // 并行 Goroutine 数目。
ChunkSize int // 默认的Chunk大小不设定则为4M
TryTimes int // 默认的尝试次数不设定则为3
}
// 分片上传的默认设置
var settings = Settings{
TaskQsize: defaultWorkers * 4,
Workers: defaultWorkers,
ChunkSize: defaultChunkSize,
TryTimes: defaultTryTimes,
}
// SetSettings 可以用来设置分片上传参数
func SetSettings(v *Settings) {
settings = *v
if settings.Workers == 0 {
settings.Workers = defaultWorkers
}
if settings.TaskQsize == 0 {
settings.TaskQsize = settings.Workers * 4
}
if settings.ChunkSize == 0 {
settings.ChunkSize = defaultChunkSize
}
if settings.TryTimes == 0 {
settings.TryTimes = defaultTryTimes
}
}
var tasks chan func()
func worker(tasks chan func()) {
for {
task := <-tasks
task()
}
}
func initWorkers() {
tasks = make(chan func(), settings.TaskQsize)
for i := 0; i < settings.Workers; i++ {
go worker(tasks)
}
}
// 上传完毕块之后的回调
func notifyNil(blkIdx int, blkSize int, ret *BlkputRet) {}
func notifyErrNil(blkIdx int, blkSize int, err error) {}
const (
blockBits = 22
blockMask = (1 << blockBits) - 1
)
// BlockCount 用来计算文件的分块数量
func BlockCount(fsize int64) int {
return int((fsize + blockMask) >> blockBits)
}
// BlkputRet 表示分片上传每个片上传完毕的返回值
type BlkputRet struct {
Ctx string `json:"ctx"`
Checksum string `json:"checksum"`
Crc32 uint32 `json:"crc32"`
Offset uint32 `json:"offset"`
Host string `json:"host"`
ExpiredAt int64 `json:"expired_at"`
}
// RputExtra 表示分片上传额外可以指定的参数
type RputExtra struct {
Params map[string]string // 可选。用户自定义参数,以"x:"开头,而且值不能为空,否则忽略
MimeType string // 可选。
ChunkSize int // 可选。每次上传的Chunk大小
TryTimes int // 可选。尝试次数
Progresses []BlkputRet // 可选。上传进度
Notify func(blkIdx int, blkSize int, ret *BlkputRet) // 可选。进度提示注意多个block是并行传输的
NotifyErr func(blkIdx int, blkSize int, err error)
}
var once sync.Once
// Put 方法用来上传一个文件,支持断点续传和分块上传。
//
// ctx 是请求的上下文。
// ret 是上传成功后返回的数据。如果 upToken 中没有设置 CallbackUrl 或 ReturnBody那么返回的数据结构是 PutRet 结构。
// upToken 是由业务服务器颁发的上传凭证。
// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外key 为空字符串是合法的。
// f 是文件内容的访问接口。考虑到需要支持分块上传和断点续传,要的是 io.ReaderAt 接口,而不是 io.Reader。
// fsize 是要上传的文件大小。
// extra 是上传的一些可选项。详细见 RputExtra 结构的描述。
//
func (p *ResumeUploader) Put(ctx context.Context, ret interface{}, upToken string, key string, f io.ReaderAt,
fsize int64, extra *RputExtra) (err error) {
err = p.rput(ctx, ret, upToken, key, true, f, fsize, extra)
return
}
// PutWithoutKey 方法用来上传一个文件,支持断点续传和分块上传。文件命名方式首先看看
// upToken 中是否设置了 saveKey如果设置了 saveKey那么按 saveKey 要求的规则生成 key否则自动以文件的 hash 做 key。
//
// ctx 是请求的上下文。
// ret 是上传成功后返回的数据。如果 upToken 中没有设置 CallbackUrl 或 ReturnBody那么返回的数据结构是 PutRet 结构。
// upToken 是由业务服务器颁发的上传凭证。
// f 是文件内容的访问接口。考虑到需要支持分块上传和断点续传,要的是 io.ReaderAt 接口,而不是 io.Reader。
// fsize 是要上传的文件大小。
// extra 是上传的一些可选项。详细见 RputExtra 结构的描述。
//
func (p *ResumeUploader) PutWithoutKey(
ctx context.Context, ret interface{}, upToken string, f io.ReaderAt, fsize int64, extra *RputExtra) (err error) {
err = p.rput(ctx, ret, upToken, "", false, f, fsize, extra)
return
}
// PutFile 用来上传一个文件,支持断点续传和分块上传。
// 和 Put 不同的只是一个通过提供文件路径来访问文件内容,一个通过 io.ReaderAt 来访问。
//
// ctx 是请求的上下文。
// ret 是上传成功后返回的数据。如果 upToken 中没有设置 CallbackUrl 或 ReturnBody那么返回的数据结构是 PutRet 结构。
// upToken 是由业务服务器颁发的上传凭证。
// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外key 为空字符串是合法的。
// localFile 是要上传的文件的本地路径。
// extra 是上传的一些可选项。详细见 RputExtra 结构的描述。
//
func (p *ResumeUploader) PutFile(
ctx context.Context, ret interface{}, upToken, key, localFile string, extra *RputExtra) (err error) {
err = p.rputFile(ctx, ret, upToken, key, true, localFile, extra)
return
}
// PutFileWithoutKey 上传一个文件,支持断点续传和分块上传。文件命名方式首先看看
// upToken 中是否设置了 saveKey如果设置了 saveKey那么按 saveKey 要求的规则生成 key否则自动以文件的 hash 做 key。
// 和 PutWithoutKey 不同的只是一个通过提供文件路径来访问文件内容,一个通过 io.ReaderAt 来访问。
//
// ctx 是请求的上下文。
// ret 是上传成功后返回的数据。如果 upToken 中没有设置 CallbackUrl 或 ReturnBody那么返回的数据结构是 PutRet 结构。
// upToken 是由业务服务器颁发的上传凭证。
// localFile 是要上传的文件的本地路径。
// extra 是上传的一些可选项。详细见 RputExtra 结构的描述。
//
func (p *ResumeUploader) PutFileWithoutKey(
ctx context.Context, ret interface{}, upToken, localFile string, extra *RputExtra) (err error) {
return p.rputFile(ctx, ret, upToken, "", false, localFile, extra)
}
func (p *ResumeUploader) rput(
ctx context.Context, ret interface{}, upToken string,
key string, hasKey bool, f io.ReaderAt, fsize int64, extra *RputExtra) (err error) {
once.Do(initWorkers)
log := xlog.NewWith(ctx)
blockCnt := BlockCount(fsize)
if extra == nil {
extra = new(RputExtra)
}
if extra.Progresses == nil {
extra.Progresses = make([]BlkputRet, blockCnt)
} else if len(extra.Progresses) != blockCnt {
return ErrInvalidPutProgress
}
if extra.ChunkSize == 0 {
extra.ChunkSize = settings.ChunkSize
}
if extra.TryTimes == 0 {
extra.TryTimes = settings.TryTimes
}
if extra.Notify == nil {
extra.Notify = notifyNil
}
if extra.NotifyErr == nil {
extra.NotifyErr = notifyErrNil
}
//get up host
ak, bucket, gErr := getAkBucketFromUploadToken(upToken)
if gErr != nil {
err = gErr
return
}
upHost, gErr := p.upHost(ak, bucket)
if gErr != nil {
err = gErr
return
}
var wg sync.WaitGroup
wg.Add(blockCnt)
last := blockCnt - 1
blkSize := 1 << blockBits
nfails := 0
for i := 0; i < blockCnt; i++ {
blkIdx := i
blkSize1 := blkSize
if i == last {
offbase := int64(blkIdx) << blockBits
blkSize1 = int(fsize - offbase)
}
task := func() {
defer wg.Done()
tryTimes := extra.TryTimes
lzRetry:
err := p.resumableBput(ctx, upToken, upHost, &extra.Progresses[blkIdx], f, blkIdx, blkSize1, extra)
if err != nil {
if tryTimes > 1 {
tryTimes--
log.Info("resumable.Put retrying ...", blkIdx, "reason:", err)
goto lzRetry
}
log.Warn("resumable.Put", blkIdx, "failed:", err)
extra.NotifyErr(blkIdx, blkSize1, err)
nfails++
}
}
tasks <- task
}
wg.Wait()
if nfails != 0 {
return ErrPutFailed
}
return p.Mkfile(ctx, upToken, upHost, ret, key, hasKey, fsize, extra)
}
func (p *ResumeUploader) rputFile(
ctx context.Context, ret interface{}, upToken string,
key string, hasKey bool, localFile string, extra *RputExtra) (err error) {
f, err := os.Open(localFile)
if err != nil {
return
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return
}
return p.rput(ctx, ret, upToken, key, hasKey, f, fi.Size(), extra)
}
func (p *ResumeUploader) upHost(ak, bucket string) (upHost string, err error) {
var zone *Zone
if p.cfg.Zone != nil {
zone = p.cfg.Zone
} else {
if v, zoneErr := GetZone(ak, bucket); zoneErr != nil {
err = zoneErr
return
} else {
zone = v
}
}
scheme := "http://"
if p.cfg.UseHTTPS {
scheme = "https://"
}
host := zone.SrcUpHosts[0]
if p.cfg.UseCdnDomains {
host = zone.CdnUpHosts[0]
}
upHost = fmt.Sprintf("%s%s", scheme, host)
return
}

349
vendor/github.com/qiniu/api.v7/storage/rpc.go generated vendored Normal file
View File

@@ -0,0 +1,349 @@
package storage
// The original library rpc.v7 logic in github.com/qiniu/x has its own bugs
// under the concurrent http calls, we make a fork of the library and fix
// the bug
import (
"bytes"
"encoding/json"
"fmt"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/conf"
"github.com/qiniu/x/reqid.v7"
. "golang.org/x/net/context"
"io"
"io/ioutil"
"net/http"
"net/url"
"runtime"
"strings"
)
var UserAgent = "Golang qiniu/rpc package"
var DefaultClient = Client{&http.Client{Transport: http.DefaultTransport}}
// --------------------------------------------------------------------
type Client struct {
*http.Client
}
// userApp should be [A-Za-z0-9_\ \-\.]*
func SetAppName(userApp string) error {
UserAgent = fmt.Sprintf(
"QiniuGo/%s (%s; %s; %s) %s", conf.Version, runtime.GOOS, runtime.GOARCH, userApp, runtime.Version())
return nil
}
// --------------------------------------------------------------------
func newRequest(ctx Context, method, reqUrl string, headers http.Header, body io.Reader) (req *http.Request, err error) {
req, err = http.NewRequest(method, reqUrl, body)
if err != nil {
return
}
if headers == nil {
headers = http.Header{}
}
req.Header = headers
//check access token
mac, ok := ctx.Value("mac").(*qbox.Mac)
if ok {
token, signErr := mac.SignRequest(req)
if signErr != nil {
err = signErr
return
}
req.Header.Add("Authorization", "QBox "+token)
}
return
}
func (r Client) DoRequest(ctx Context, method, reqUrl string, headers http.Header) (resp *http.Response, err error) {
req, err := newRequest(ctx, method, reqUrl, headers, nil)
if err != nil {
return
}
return r.Do(ctx, req)
}
func (r Client) DoRequestWith(ctx Context, method, reqUrl string, headers http.Header, body io.Reader,
bodyLength int) (resp *http.Response, err error) {
req, err := newRequest(ctx, method, reqUrl, headers, body)
if err != nil {
return
}
req.ContentLength = int64(bodyLength)
return r.Do(ctx, req)
}
func (r Client) DoRequestWith64(ctx Context, method, reqUrl string, headers http.Header, body io.Reader,
bodyLength int64) (resp *http.Response, err error) {
req, err := newRequest(ctx, method, reqUrl, headers, body)
if err != nil {
return
}
req.ContentLength = bodyLength
return r.Do(ctx, req)
}
func (r Client) DoRequestWithForm(ctx Context, method, reqUrl string, headers http.Header,
data map[string][]string) (resp *http.Response, err error) {
if headers == nil {
headers = http.Header{}
}
headers.Add("Content-Type", "application/x-www-form-urlencoded")
requestData := url.Values(data).Encode()
if method == "GET" || method == "HEAD" || method == "DELETE" {
if strings.ContainsRune(reqUrl, '?') {
reqUrl += "&"
} else {
reqUrl += "?"
}
return r.DoRequest(ctx, method, reqUrl+requestData, headers)
}
return r.DoRequestWith(ctx, method, reqUrl, headers, strings.NewReader(requestData), len(requestData))
}
func (r Client) DoRequestWithJson(ctx Context, method, reqUrl string, headers http.Header,
data interface{}) (resp *http.Response, err error) {
reqBody, err := json.Marshal(data)
if err != nil {
return
}
if headers == nil {
headers = http.Header{}
}
headers.Add("Content-Type", "application/json")
return r.DoRequestWith(ctx, method, reqUrl, headers, bytes.NewReader(reqBody), len(reqBody))
}
func (r Client) Do(ctx Context, req *http.Request) (resp *http.Response, err error) {
if ctx == nil {
ctx = Background()
}
if reqId, ok := reqid.FromContext(ctx); ok {
req.Header.Set("X-Reqid", reqId)
}
if _, ok := req.Header["User-Agent"]; !ok {
req.Header.Set("User-Agent", UserAgent)
}
transport := r.Transport // don't change r.Transport
if transport == nil {
transport = http.DefaultTransport
}
// avoid cancel() is called before Do(req), but isn't accurate
select {
case <-ctx.Done():
err = ctx.Err()
return
default:
}
if tr, ok := getRequestCanceler(transport); ok {
// support CancelRequest
reqC := make(chan bool, 1)
go func() {
resp, err = r.Client.Do(req)
reqC <- true
}()
select {
case <-reqC:
case <-ctx.Done():
tr.CancelRequest(req)
<-reqC
err = ctx.Err()
}
} else {
resp, err = r.Client.Do(req)
}
return
}
// --------------------------------------------------------------------
type ErrorInfo struct {
Err string `json:"error,omitempty"`
Key string `json:"key,omitempty"`
Reqid string `json:"reqid,omitempty"`
Errno int `json:"errno,omitempty"`
Code int `json:"code"`
}
func (r *ErrorInfo) ErrorDetail() string {
msg, _ := json.Marshal(r)
return string(msg)
}
func (r *ErrorInfo) Error() string {
return r.Err
}
func (r *ErrorInfo) RpcError() (code, errno int, key, err string) {
return r.Code, r.Errno, r.Key, r.Err
}
func (r *ErrorInfo) HttpCode() int {
return r.Code
}
// --------------------------------------------------------------------
func parseError(e *ErrorInfo, r io.Reader) {
body, err1 := ioutil.ReadAll(r)
if err1 != nil {
e.Err = err1.Error()
return
}
var ret struct {
Err string `json:"error"`
Key string `json:"key"`
Errno int `json:"errno"`
}
if json.Unmarshal(body, &ret) == nil && ret.Err != "" {
// qiniu error msg style returns here
e.Err, e.Key, e.Errno = ret.Err, ret.Key, ret.Errno
return
}
e.Err = string(body)
}
func ResponseError(resp *http.Response) (err error) {
e := &ErrorInfo{
Reqid: resp.Header.Get("X-Reqid"),
Code: resp.StatusCode,
}
if resp.StatusCode > 299 {
if resp.ContentLength != 0 {
ct, ok := resp.Header["Content-Type"]
if ok && strings.HasPrefix(ct[0], "application/json") {
parseError(e, resp.Body)
}
}
}
return e
}
func CallRet(ctx Context, ret interface{}, resp *http.Response) (err error) {
defer func() {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}()
if resp.StatusCode/100 == 2 {
if ret != nil && resp.ContentLength != 0 {
err = json.NewDecoder(resp.Body).Decode(ret)
if err != nil {
return
}
}
if resp.StatusCode == 200 {
return nil
}
}
return ResponseError(resp)
}
func (r Client) CallWithForm(ctx Context, ret interface{}, method, reqUrl string, headers http.Header,
param map[string][]string) (err error) {
resp, err := r.DoRequestWithForm(ctx, method, reqUrl, headers, param)
if err != nil {
return err
}
return CallRet(ctx, ret, resp)
}
func (r Client) CallWithJson(ctx Context, ret interface{}, method, reqUrl string, headers http.Header,
param interface{}) (err error) {
resp, err := r.DoRequestWithJson(ctx, method, reqUrl, headers, param)
if err != nil {
return err
}
return CallRet(ctx, ret, resp)
}
func (r Client) CallWith(ctx Context, ret interface{}, method, reqUrl string, headers http.Header, body io.Reader,
bodyLength int) (err error) {
resp, err := r.DoRequestWith(ctx, method, reqUrl, headers, body, bodyLength)
if err != nil {
return err
}
return CallRet(ctx, ret, resp)
}
func (r Client) CallWith64(ctx Context, ret interface{}, method, reqUrl string, headers http.Header, body io.Reader,
bodyLength int64) (err error) {
resp, err := r.DoRequestWith64(ctx, method, reqUrl, headers, body, bodyLength)
if err != nil {
return err
}
return CallRet(ctx, ret, resp)
}
func (r Client) Call(ctx Context, ret interface{}, method, reqUrl string, headers http.Header) (err error) {
resp, err := r.DoRequestWith(ctx, method, reqUrl, headers, nil, 0)
if err != nil {
return err
}
return CallRet(ctx, ret, resp)
}
// ---------------------------------------------------------------------------
type requestCanceler interface {
CancelRequest(req *http.Request)
}
type nestedObjectGetter interface {
NestedObject() interface{}
}
func getRequestCanceler(tp http.RoundTripper) (rc requestCanceler, ok bool) {
if rc, ok = tp.(requestCanceler); ok {
return
}
p := interface{}(tp)
for {
getter, ok1 := p.(nestedObjectGetter)
if !ok1 {
return
}
p = getter.NestedObject()
if rc, ok = p.(requestCanceler); ok {
return
}
}
}
// --------------------------------------------------------------------

73
vendor/github.com/qiniu/api.v7/storage/token.go generated vendored Normal file
View File

@@ -0,0 +1,73 @@
package storage
import (
"encoding/base64"
"encoding/json"
"errors"
"strings"
"time"
"github.com/qiniu/api.v7/auth/qbox"
)
// PutPolicy 表示文件上传的上传策略
type PutPolicy struct {
Scope string `json:"scope"`
Expires uint32 `json:"deadline"` // 截止时间(以秒为单位)
IsPrefixalScope int `json:"isPrefixalScope,omitempty"`
InsertOnly uint16 `json:"insertOnly,omitempty"` // 若非0, 即使Scope为 Bucket:Key 的形式也是insert only
DetectMime uint8 `json:"detectMime,omitempty"` // 若非0, 则服务端根据内容自动确定 MimeType
FsizeLimit int64 `json:"fsizeLimit,omitempty"`
MimeLimit string `json:"mimeLimit,omitempty"`
SaveKey string `json:"saveKey,omitempty"`
CallbackFetchKey uint8 `json:"callbackFetchKey,omitempty"`
CallbackURL string `json:"callbackUrl,omitempty"`
CallbackHost string `json:"callbackHost,omitempty"`
CallbackBody string `json:"callbackBody,omitempty"`
CallbackBodyType string `json:"callbackBodyType,omitempty"`
ReturnURL string `json:"returnUrl,omitempty"`
ReturnBody string `json:"returnBody,omitempty"`
PersistentOps string `json:"persistentOps,omitempty"`
PersistentNotifyURL string `json:"persistentNotifyUrl,omitempty"`
PersistentPipeline string `json:"persistentPipeline,omitempty"`
EndUser string `json:"endUser,omitempty"`
DeleteAfterDays int `json:"deleteAfterDays,omitempty"`
FileType int `json:"fileType,omitempty"`
}
// UploadToken 方法用来进行上传凭证的生成
func (p *PutPolicy) UploadToken(mac *qbox.Mac) (token string) {
if p.Expires == 0 {
p.Expires = 3600 // 1 hour
}
p.Expires += uint32(time.Now().Unix())
putPolicyJSON, _ := json.Marshal(p)
token = mac.SignWithData(putPolicyJSON)
return
}
func getAkBucketFromUploadToken(token string) (ak, bucket string, err error) {
items := strings.Split(token, ":")
if len(items) != 3 {
err = errors.New("invalid upload token, format error")
return
}
ak = items[0]
policyBytes, dErr := base64.URLEncoding.DecodeString(items[2])
if dErr != nil {
err = errors.New("invalid upload token, invalid put policy")
return
}
putPolicy := PutPolicy{}
uErr := json.Unmarshal(policyBytes, &putPolicy)
if uErr != nil {
err = errors.New("invalid upload token, invalid put policy")
return
}
bucket = strings.Split(putPolicy.Scope, ":")[0]
return
}

22
vendor/github.com/qiniu/api.v7/storage/util.go generated vendored Normal file
View File

@@ -0,0 +1,22 @@
package storage
import (
"time"
)
// ParsePutTime 提供了将PutTime转换为 time.Time 的功能
func ParsePutTime(putTime int64) (t time.Time) {
t = time.Unix(0, putTime*100)
return
}
// IsContextExpired 检查分片上传的ctx是否过期提前一天让它过期
// 因为我们认为如果断点继续上传的话最长需要1天时间
func IsContextExpired(blkPut BlkputRet) bool {
if blkPut.Ctx == "" {
return false
}
target := time.Unix(blkPut.ExpiredAt, 0).AddDate(0, 0, -1)
now := time.Now()
return now.After(target)
}

247
vendor/github.com/qiniu/api.v7/storage/zone.go generated vendored Normal file
View File

@@ -0,0 +1,247 @@
package storage
import (
"context"
"fmt"
"strings"
"sync"
)
// Zone 为空间对应的机房属性,主要包括了上传,资源管理等操作的域名
type Zone struct {
SrcUpHosts []string
CdnUpHosts []string
RsHost string
RsfHost string
ApiHost string
IovipHost string
}
func (z *Zone) String() string {
str := ""
str += fmt.Sprintf("SrcUpHosts: %v\n", z.SrcUpHosts)
str += fmt.Sprintf("CdnUpHosts: %v\n", z.CdnUpHosts)
str += fmt.Sprintf("IovipHost: %s\n", z.IovipHost)
str += fmt.Sprintf("RsHost: %s\n", z.RsHost)
str += fmt.Sprintf("RsfHost: %s\n", z.RsfHost)
str += fmt.Sprintf("ApiHost: %s\n", z.ApiHost)
return str
}
func (z *Zone) GetRsfHost(useHttps bool) string {
scheme := "http://"
if useHttps {
scheme = "https://"
}
return fmt.Sprintf("%s%s", scheme, z.RsfHost)
}
func (z *Zone) GetIoHost(useHttps bool) string {
scheme := "http://"
if useHttps {
scheme = "https://"
}
return fmt.Sprintf("%s%s", scheme, z.IovipHost)
}
func (z *Zone) GetRsHost(useHttps bool) string {
scheme := "http://"
if useHttps {
scheme = "https://"
}
return fmt.Sprintf("%s%s", scheme, z.RsHost)
}
func (z *Zone) GetApiHost(useHttps bool) string {
scheme := "http://"
if useHttps {
scheme = "https://"
}
return fmt.Sprintf("%s%s", scheme, z.ApiHost)
}
// ZoneHuadong 表示华东机房
var ZoneHuadong = Zone{
SrcUpHosts: []string{
"up.qiniup.com",
"up-nb.qiniup.com",
"up-xs.qiniup.com",
},
CdnUpHosts: []string{
"upload.qiniup.com",
"upload-nb.qiniup.com",
"upload-xs.qiniup.com",
},
RsHost: "rs.qbox.me",
RsfHost: "rsf.qbox.me",
ApiHost: "api.qiniu.com",
IovipHost: "iovip.qbox.me",
}
// ZoneHuabei 表示华北机房
var ZoneHuabei = Zone{
SrcUpHosts: []string{
"up-z1.qiniup.com",
},
CdnUpHosts: []string{
"upload-z1.qiniup.com",
},
RsHost: "rs-z1.qbox.me",
RsfHost: "rsf-z1.qbox.me",
ApiHost: "api-z1.qiniu.com",
IovipHost: "iovip-z1.qbox.me",
}
// ZoneHuanan 表示华南机房
var ZoneHuanan = Zone{
SrcUpHosts: []string{
"up-z2.qiniup.com",
"up-gz.qiniup.com",
"up-fs.qiniup.com",
},
CdnUpHosts: []string{
"upload-z2.qiniup.com",
"upload-gz.qiniup.com",
"upload-fs.qiniup.com",
},
RsHost: "rs-z2.qbox.me",
RsfHost: "rsf-z2.qbox.me",
ApiHost: "api-z2.qiniu.com",
IovipHost: "iovip-z2.qbox.me",
}
// ZoneBeimei 表示北美机房
var ZoneBeimei = Zone{
SrcUpHosts: []string{
"up-na0.qiniup.com",
},
CdnUpHosts: []string{
"upload-na0.qiniup.com",
},
RsHost: "rs-na0.qbox.me",
RsfHost: "rsf-na0.qbox.me",
ApiHost: "api-na0.qiniu.com",
IovipHost: "iovip-na0.qbox.me",
}
// ZoneXinjiapo 表示新加坡机房
var ZoneXinjiapo = Zone{
SrcUpHosts: []string{
"up-as0.qiniup.com",
},
CdnUpHosts: []string{
"upload-as0.qiniup.com",
},
RsHost: "rs-as0.qbox.me",
RsfHost: "rsf-as0.qbox.me",
ApiHost: "api-as0.qiniu.com",
IovipHost: "iovip-as0.qbox.me",
}
// for programmers
var Zone_z0 = ZoneHuadong
var Zone_z1 = ZoneHuabei
var Zone_z2 = ZoneHuanan
var Zone_na0 = ZoneBeimei
var Zone_as0 = ZoneXinjiapo
// UcHost 为查询空间相关域名的API服务地址
const UcHost = "https://uc.qbox.me"
// UcQueryRet 为查询请求的回复
type UcQueryRet struct {
TTL int `json:"ttl"`
Io map[string]map[string][]string `json:"io"`
Up map[string]UcQueryUp `json:"up"`
}
// UcQueryUp 为查询请求回复中的上传域名信息
type UcQueryUp struct {
Main []string `json:"main,omitempty"`
Backup []string `json:"backup,omitempty"`
Info string `json:"info,omitempty"`
}
var (
zoneMutext sync.RWMutex
zoneCache = make(map[string]*Zone)
)
// GetZone 用来根据ak和bucket来获取空间相关的机房信息
func GetZone(ak, bucket string) (zone *Zone, err error) {
zoneID := fmt.Sprintf("%s:%s", ak, bucket)
//check from cache
zoneMutext.RLock()
if v, ok := zoneCache[zoneID]; ok {
zone = v
}
zoneMutext.RUnlock()
if zone != nil {
return
}
//query from server
reqURL := fmt.Sprintf("%s/v2/query?ak=%s&bucket=%s", UcHost, ak, bucket)
var ret UcQueryRet
ctx := context.TODO()
qErr := DefaultClient.CallWithForm(ctx, &ret, "GET", reqURL, nil, nil)
if qErr != nil {
err = fmt.Errorf("query zone error, %s", qErr.Error())
return
}
ioHost := ret.Io["src"]["main"][0]
srcUpHosts := ret.Up["src"].Main
if ret.Up["src"].Backup != nil {
srcUpHosts = append(srcUpHosts, ret.Up["src"].Backup...)
}
cdnUpHosts := ret.Up["acc"].Main
if ret.Up["acc"].Backup != nil {
cdnUpHosts = append(cdnUpHosts, ret.Up["acc"].Backup...)
}
zone = &Zone{
SrcUpHosts: srcUpHosts,
CdnUpHosts: cdnUpHosts,
IovipHost: ioHost,
RsHost: DefaultRsHost,
RsfHost: DefaultRsfHost,
ApiHost: DefaultAPIHost,
}
//set specific hosts if possible
setSpecificHosts(ioHost, zone)
zoneMutext.Lock()
zoneCache[zoneID] = zone
zoneMutext.Unlock()
return
}
func setSpecificHosts(ioHost string, zone *Zone) {
if strings.Contains(ioHost, "-z1") {
zone.RsHost = "rs-z1.qbox.me"
zone.RsfHost = "rsf-z1.qbox.me"
zone.ApiHost = "api-z1.qiniu.com"
} else if strings.Contains(ioHost, "-z2") {
zone.RsHost = "rs-z2.qbox.me"
zone.RsfHost = "rsf-z2.qbox.me"
zone.ApiHost = "api-z2.qiniu.com"
} else if strings.Contains(ioHost, "-na0") {
zone.RsHost = "rs-na0.qbox.me"
zone.RsfHost = "rsf-na0.qbox.me"
zone.ApiHost = "api-na0.qiniu.com"
} else if strings.Contains(ioHost, "-as0") {
zone.RsHost = "rs-as0.qbox.me"
zone.RsfHost = "rsf-as0.qbox.me"
zone.ApiHost = "api-as0.qiniu.com"
}
}

4
vendor/github.com/qiniu/x/bytes.v7/README.md generated vendored Normal file
View File

@@ -0,0 +1,4 @@
qiniupkg.com/x/bytes.v7
=====
Extension module of golang bytes processing

177
vendor/github.com/qiniu/x/bytes.v7/bytes.go generated vendored Normal file
View File

@@ -0,0 +1,177 @@
package bytes
import (
"io"
"syscall"
)
// ---------------------------------------------------
type Reader struct {
b []byte
off int
}
func NewReader(val []byte) *Reader {
return &Reader{val, 0}
}
func (r *Reader) Len() int {
if r.off >= len(r.b) {
return 0
}
return len(r.b) - r.off
}
func (r *Reader) Bytes() []byte {
return r.b[r.off:]
}
func (r *Reader) SeekToBegin() (err error) {
r.off = 0
return
}
func (r *Reader) Seek(offset int64, whence int) (ret int64, err error) {
switch whence {
case 0:
case 1:
offset += int64(r.off)
case 2:
offset += int64(len(r.b))
default:
err = syscall.EINVAL
return
}
if offset < 0 {
err = syscall.EINVAL
return
}
if offset >= int64(len(r.b)) {
r.off = len(r.b)
} else {
r.off = int(offset)
}
ret = int64(r.off)
return
}
func (r *Reader) Read(val []byte) (n int, err error) {
n = copy(val, r.b[r.off:])
if n == 0 && len(val) != 0 {
err = io.EOF
return
}
r.off += n
return
}
func (r *Reader) Close() (err error) {
return
}
// ---------------------------------------------------
type Writer struct {
b []byte
n int
}
func NewWriter(buff []byte) *Writer {
return &Writer{buff, 0}
}
func (p *Writer) Write(val []byte) (n int, err error) {
n = copy(p.b[p.n:], val)
if n == 0 && len(val) > 0 {
err = io.EOF
return
}
p.n += n
return
}
func (p *Writer) Len() int {
return p.n
}
func (p *Writer) Bytes() []byte {
return p.b[:p.n]
}
func (p *Writer) Reset() {
p.n = 0
}
// ---------------------------------------------------
type Buffer struct {
b []byte
}
func NewBuffer() *Buffer {
return new(Buffer)
}
func (p *Buffer) ReadAt(buf []byte, off int64) (n int, err error) {
ioff := int(off)
if len(p.b) <= ioff {
return 0, io.EOF
}
n = copy(buf, p.b[ioff:])
if n != len(buf) {
err = io.EOF
}
return
}
func (p *Buffer) WriteAt(buf []byte, off int64) (n int, err error) {
ioff := int(off)
iend := ioff + len(buf)
if len(p.b) < iend {
if len(p.b) == ioff {
p.b = append(p.b, buf...)
return len(buf), nil
}
zero := make([]byte, iend-len(p.b))
p.b = append(p.b, zero...)
}
copy(p.b[ioff:], buf)
return len(buf), nil
}
func (p *Buffer) WriteStringAt(buf string, off int64) (n int, err error) {
ioff := int(off)
iend := ioff + len(buf)
if len(p.b) < iend {
if len(p.b) == ioff {
p.b = append(p.b, buf...)
return len(buf), nil
}
zero := make([]byte, iend-len(p.b))
p.b = append(p.b, zero...)
}
copy(p.b[ioff:], buf)
return len(buf), nil
}
func (p *Buffer) Truncate(fsize int64) (err error) {
size := int(fsize)
if len(p.b) < size {
zero := make([]byte, size-len(p.b))
p.b = append(p.b, zero...)
} else {
p.b = p.b[:size]
}
return nil
}
func (p *Buffer) Buffer() []byte {
return p.b
}
func (p *Buffer) Len() int {
return len(p.b)
}
// ---------------------------------------------------

34
vendor/github.com/qiniu/x/bytes.v7/doc.go generated vendored Normal file
View File

@@ -0,0 +1,34 @@
/*
包 qiniupkg.com/x/bytes.v7 提供了 byte slice 相关的功能扩展
NewReader 创建一个 byte slice 的只读流:
var slice []byte
...
r := bytes.NewReader(slice)
...
r.Seek(0, 0) // r.SeekToBegin()
...
和标准库的 bytes.NewReader 不同的是,这里的 Reader 支持 Seek。
NewWriter 创建一个有上限容量的写流:
slice := make([]byte, 1024)
w := bytes.NewWriter(slice)
...
writtenData := w.Bytes()
如果我们向 w 里面写入超过 1024 字节的数据,那么多余的数据会被丢弃。
NewBuffer 创建一个可随机读写的内存文件,支持 ReadAt/WriteAt 方法,而不是 Read/Write:
b := bytes.NewBuffer()
b.Truncate(100)
b.WriteAt([]byte("hello"), 100)
slice := make([]byte, 105)
n, err := b.ReadAt(slice, 0)
...
*/
package bytes

54
vendor/github.com/qiniu/x/bytes.v7/replace.go generated vendored Normal file
View File

@@ -0,0 +1,54 @@
package bytes
import (
"bytes"
)
// ---------------------------------------------------
func ReplaceAt(b []byte, off, nsrc int, dest []byte) []byte {
ndelta := len(dest) - nsrc
if ndelta < 0 {
left := b[off+nsrc:]
off += copy(b[off:], dest)
off += copy(b[off:], left)
return b[:off]
}
if ndelta > 0 {
b = append(b, dest[:ndelta]...)
copy(b[off+len(dest):], b[off+nsrc:])
copy(b[off:], dest)
} else {
copy(b[off:], dest)
}
return b
}
func ReplaceOne(b []byte, from int, src, dest []byte) ([]byte, int) {
pos := bytes.Index(b[from:], src)
if pos < 0 {
return b, -1
}
from += pos
return ReplaceAt(b, from, len(src), dest), from + len(dest)
}
func Replace(b []byte, src, dest []byte, n int) []byte {
from := 0
for n != 0 {
b, from = ReplaceOne(b, from, src, dest)
if from < 0 {
break
}
n--
}
return b
}
// ---------------------------------------------------

View File

@@ -0,0 +1,63 @@
// This package provide a method to read and replace http.Request's body.
package seekable
import (
"errors"
"io"
"io/ioutil"
"net/http"
"qiniupkg.com/x/bytes.v7"
)
// ---------------------------------------------------
type Seekabler interface {
Bytes() []byte
Read(val []byte) (n int, err error)
SeekToBegin() error
}
type SeekableCloser interface {
Seekabler
io.Closer
}
// ---------------------------------------------------
type readCloser struct {
Seekabler
io.Closer
}
var ErrNoBody = errors.New("no body")
func New(req *http.Request) (r SeekableCloser, err error) {
if req.Body == nil {
return nil, ErrNoBody
}
var ok bool
if r, ok = req.Body.(SeekableCloser); ok {
return
}
b, err2 := ReadAll(req)
if err2 != nil {
return nil, err2
}
r = bytes.NewReader(b)
req.Body = readCloser{r, req.Body}
return
}
func ReadAll(req *http.Request) (b []byte, err error) {
if req.ContentLength > 0 {
b = make([]byte, int(req.ContentLength))
_, err = io.ReadFull(req.Body, b)
return
} else if req.ContentLength == 0 {
return nil, ErrNoBody
}
return ioutil.ReadAll(req.Body)
}
// ---------------------------------------------------

52
vendor/github.com/qiniu/x/reqid.v7/reqid.go generated vendored Normal file
View File

@@ -0,0 +1,52 @@
package reqid
import (
"encoding/binary"
"encoding/base64"
"net/http"
"time"
. "golang.org/x/net/context"
)
// --------------------------------------------------------------------
var pid = uint32(time.Now().UnixNano() % 4294967291)
func genReqId() string {
var b [12]byte
binary.LittleEndian.PutUint32(b[:], pid)
binary.LittleEndian.PutUint64(b[4:], uint64(time.Now().UnixNano()))
return base64.URLEncoding.EncodeToString(b[:])
}
// --------------------------------------------------------------------
type key int // key is unexported and used for Context
const (
reqidKey key = 0
)
func NewContext(ctx Context, reqid string) Context {
return WithValue(ctx, reqidKey, reqid)
}
func NewContextWith(ctx Context, w http.ResponseWriter, req *http.Request) Context {
reqid := req.Header.Get("X-Reqid")
if reqid == "" {
reqid = genReqId()
req.Header.Set("X-Reqid", reqid)
}
h := w.Header()
h.Set("X-Reqid", reqid)
return WithValue(ctx, reqidKey, reqid)
}
func FromContext(ctx Context) (reqid string, ok bool) {
reqid, ok = ctx.Value(reqidKey).(string)
return
}
// --------------------------------------------------------------------

211
vendor/github.com/qiniu/x/xlog.v7/xlog.go generated vendored Normal file
View File

@@ -0,0 +1,211 @@
package xlog
import (
"fmt"
"io"
"os"
"runtime"
"qiniupkg.com/x/log.v7"
"qiniupkg.com/x/reqid.v7"
. "golang.org/x/net/context"
)
const (
Ldate = log.Ldate
Ltime = log.Ltime
Lmicroseconds = log.Lmicroseconds
Llongfile = log.Llongfile
Lshortfile = log.Lshortfile
Lmodule = log.Lmodule
Llevel = log.Llevel
LstdFlags = log.LstdFlags
Ldefault = log.Ldefault
)
const (
Ldebug = log.Ldebug
Linfo = log.Linfo
Lwarn = log.Lwarn
Lerror = log.Lerror
Lpanic = log.Lpanic
Lfatal = log.Lfatal
)
// ============================================================================
// type *Logger
type Logger struct {
ReqId string
}
func New(reqId string) *Logger {
return &Logger{reqId}
}
func NewWith(ctx Context) *Logger {
reqId, ok := reqid.FromContext(ctx)
if !ok {
log.Debug("xlog.New: reqid isn't find in context")
}
return &Logger{reqId}
}
func (xlog *Logger) Spawn(child string) *Logger {
return &Logger{xlog.ReqId + "." + child}
}
// ============================================================================
// Print calls Output to print to the standard Logger.
// Arguments are handled in the manner of fmt.Print.
func (xlog *Logger) Print(v ...interface{}) {
log.Std.Output(xlog.ReqId, log.Linfo, 2, fmt.Sprint(v...))
}
// Printf calls Output to print to the standard Logger.
// Arguments are handled in the manner of fmt.Printf.
func (xlog *Logger) Printf(format string, v ...interface{}) {
log.Std.Output(xlog.ReqId, log.Linfo, 2, fmt.Sprintf(format, v...))
}
// Println calls Output to print to the standard Logger.
// Arguments are handled in the manner of fmt.Println.
func (xlog *Logger) Println(v ...interface{}) {
log.Std.Output(xlog.ReqId, log.Linfo, 2, fmt.Sprintln(v...))
}
// -----------------------------------------
func (xlog *Logger) Debugf(format string, v ...interface{}) {
if log.Ldebug < log.Std.Level {
return
}
log.Std.Output(xlog.ReqId, log.Ldebug, 2, fmt.Sprintf(format, v...))
}
func (xlog *Logger) Debug(v ...interface{}) {
if log.Ldebug < log.Std.Level {
return
}
log.Std.Output(xlog.ReqId, log.Ldebug, 2, fmt.Sprintln(v...))
}
// -----------------------------------------
func (xlog *Logger) Infof(format string, v ...interface{}) {
if log.Linfo < log.Std.Level {
return
}
log.Std.Output(xlog.ReqId, log.Linfo, 2, fmt.Sprintf(format, v...))
}
func (xlog *Logger) Info(v ...interface{}) {
if log.Linfo < log.Std.Level {
return
}
log.Std.Output(xlog.ReqId, log.Linfo, 2, fmt.Sprintln(v...))
}
// -----------------------------------------
func (xlog *Logger) Warnf(format string, v ...interface{}) {
log.Std.Output(xlog.ReqId, log.Lwarn, 2, fmt.Sprintf(format, v...))
}
func (xlog *Logger) Warn(v ...interface{}) {
log.Std.Output(xlog.ReqId, log.Lwarn, 2, fmt.Sprintln(v...))
}
// -----------------------------------------
func (xlog *Logger) Errorf(format string, v ...interface{}) {
log.Std.Output(xlog.ReqId, log.Lerror, 2, fmt.Sprintf(format, v...))
}
func (xlog *Logger) Error(v ...interface{}) {
log.Std.Output(xlog.ReqId, log.Lerror, 2, fmt.Sprintln(v...))
}
// -----------------------------------------
// Fatal is equivalent to Print() followed by a call to os.Exit(1).
func (xlog *Logger) Fatal(v ...interface{}) {
log.Std.Output(xlog.ReqId, log.Lfatal, 2, fmt.Sprint(v...))
os.Exit(1)
}
// Fatalf is equivalent to Printf() followed by a call to os.Exit(1).
func (xlog *Logger) Fatalf(format string, v ...interface{}) {
log.Std.Output(xlog.ReqId, log.Lfatal, 2, fmt.Sprintf(format, v...))
os.Exit(1)
}
// Fatalln is equivalent to Println() followed by a call to os.Exit(1).
func (xlog *Logger) Fatalln(v ...interface{}) {
log.Std.Output(xlog.ReqId, log.Lfatal, 2, fmt.Sprintln(v...))
os.Exit(1)
}
// -----------------------------------------
// Panic is equivalent to Print() followed by a call to panic().
func (xlog *Logger) Panic(v ...interface{}) {
s := fmt.Sprint(v...)
log.Std.Output(xlog.ReqId, log.Lpanic, 2, s)
panic(s)
}
// Panicf is equivalent to Printf() followed by a call to panic().
func (xlog *Logger) Panicf(format string, v ...interface{}) {
s := fmt.Sprintf(format, v...)
log.Std.Output(xlog.ReqId, log.Lpanic, 2, s)
panic(s)
}
// Panicln is equivalent to Println() followed by a call to panic().
func (xlog *Logger) Panicln(v ...interface{}) {
s := fmt.Sprintln(v...)
log.Std.Output(xlog.ReqId, log.Lpanic, 2, s)
panic(s)
}
func (xlog *Logger) Stack(v ...interface{}) {
s := fmt.Sprint(v...)
s += "\n"
buf := make([]byte, 1024*1024)
n := runtime.Stack(buf, true)
s += string(buf[:n])
s += "\n"
log.Std.Output(xlog.ReqId, log.Lerror, 2, s)
}
func (xlog *Logger) SingleStack(v ...interface{}) {
s := fmt.Sprint(v...)
s += "\n"
buf := make([]byte, 1024*1024)
n := runtime.Stack(buf, false)
s += string(buf[:n])
s += "\n"
log.Std.Output(xlog.ReqId, log.Lerror, 2, s)
}
// ============================================================================
func SetOutput(w io.Writer) {
log.SetOutput(w)
}
func SetFlags(flag int) {
log.SetFlags(flag)
}
func SetOutputLevel(lvl int) {
log.SetOutputLevel(lvl)
}
// ============================================================================