Migrating Command Line Media Workflows and Scripted FTP to M-Stream accelerated File Transfer

Scripted FTPA lot of prospects and customers who take a look at the File Fabric for Media are either using Scripted FTP or they have existing IP in their command line media workflows (scripted FTP generally refers to FTP that has custom scripts written to enable automation.).

The great things about the M-Stream file transfer acceleration feature of the Enterprise File Fabric is that not only can it be used transparently through the browser and the desktop, as it has a very rich HTTPS API then it can easily be integrated into existing command line workflows.

Below is a GOLang script that does exactly that, providing an accelerated media transporter for moving files using storage such as Amazon S3, Azure, Google, Storage, Desktop (CIFS, NFS, SMB, SAN, NAS etc), OpenStack and other S3 compatible clouds.

package main
​
import (
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
"io/ioutil"
"path/filepath"
"strconv"
"sync"
"time"
"syscall"
"strings"
"encoding/base64"
"encoding/xml"
"github.com/sethgrid/multibar"
"github.com/urfave/cli"
"golang.org/x/crypto/ssh/terminal"
)
​
type Worker struct {
Url string
File *os.File
Count int
SyncWG sync.WaitGroup
TotalSize int
Progress
}
​
type Progress struct {
Bars *multibar.BarContainer
Update map[int]multibar.ProgressFunc
}
​
func main() {
app := cli.NewApp()
​
app.Name = "File Fabric Transporter"
app.Usage = "Tool to transport files between local storage and the File Fabric"
app.Flags = []cli.Flag {
cli.StringFlag{
Name: "username, u",
Value: "",
Usage: "REQUIRED File Fabric Username",
},
cli.StringFlag{
Name: "password, p",
Value: "",
Usage: "REQUIRED File Fabric Password",
},
cli.StringFlag{
Name: "endpoint, e",
Value: "",
Usage: "REQUIRED File Fabric Endpoint",
},
cli.StringFlag{
Name: "output, o",
Value: "",
Usage: "Output File",
},
cli.StringFlag{
Name: "id, i",
Value: "",
Usage: "File ID",
},
cli.IntFlag{
Name: "streams, s",
Value: 5,
Usage: "[OPTIONAL] Streams",
},
}
​
app.Commands = []cli.Command{
{
Name: "download",
Aliases: []string{"d"},
Usage: "Downloads a file from the File Fabric",
Action: downloadFile,
Flags: app.Flags,
},
}
​
​
​
err := app.Run(os.Args)

if err != nil {
log.Fatal(err)
}
// e.g. http://ipv4.download.thinkbroadband.com/20MB.zip
// e.g. http://ipv4.download.thinkbroadband.com/50MB.zip

}
​
func downloadFile(c *cli.Context) error {
​
fmt.Println("--- Starting ---")
​
sme_endpoint := c.String("endpoint")
username := c.String("username")
fi_id := c.String("id")
output := c.String("output")
streams := int(c.Int("streams"))
password := c.String("password")

if len(c.String("password")) == 0 {
password = promptPass()
}

​
fmt.Println("Endpoint: " + sme_endpoint)
fmt.Println("Username: " + username)
fmt.Println("File ID: " + fi_id)
fmt.Println("Output File: " + output)
fmt.Println("Parrallel Streams: " + string(streams))
​
fmt.Println("[Step 1] Obtaining token")
​
resp, _ := http.Get(sme_endpoint + "/api/*/getToken/" + b64Encode(username) + "," + b64Encode(password))
defer resp.Body.Close()
​
body, _ := ioutil.ReadAll(resp.Body)
​
var token_response TokenResponse
xml.Unmarshal(body, &token_response)
​
fmt.Println("[Step 1] API Token: " + token_response.Token)
​
fmt.Println("[Step 2] Get File Info")
info_resp, _ := http.Get(sme_endpoint + "/api/" + token_response.Token + "/getFileInfo/" + b64Encode(fi_id))
info_body, _ := ioutil.ReadAll(info_resp.Body)

var file_info_response FileInfoResponse
xml.Unmarshal(info_body, &file_info_response)
​
fmt.Println("[Step 2] File Size: " + strconv.FormatInt(file_info_response.FileSize, 10))
fmt.Println("[Step 3] Downloading file")
​
​
download_url := sme_endpoint + "/api/" + token_response.Token + "/getFile/" + b64Encode(fi_id)
worker_count := int(streams) // Goroutine number
​
// Get header
total_size := int(file_info_response.FileSize)
fmt.Printf("Url: %s\n", download_url)
fmt.Printf("File size: %d bytes\n", total_size)
​
f, err := os.OpenFile(output, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
log.Fatal("Failed to create file, error:", err)
}
defer f.Close()
​
// New worker struct for downloading file
var worker = Worker{
Url: download_url,
File: f,
Count: worker_count,
TotalSize: total_size,
}
​
// Progress bar
worker.Progress.Bars, _ = multibar.New()
fmt.Printf("%+v\n", worker.Progress.Bars.Bars)
// worker.Progress.Bars.Bar = multibar.ProgressBar{
// Head: '$$$',
// }
worker.Progress.Update = make(map[int]multibar.ProgressFunc)
​
var start, end int
var partial_size = int(total_size / worker_count)
​
startTime := time.Now()
for num := 1; num <= worker.Count; num++ {
// Print progress bar
// worker.Progress.Bars.Bars.Head = "S"
worker.Progress.Update[num] = worker.Progress.Bars.MakeBar(100, fmt.Sprintf("Part %d", num))
// worker.Progress.Update[num].Head ="Q"
worker.Progress.Bars.Bars[num-1].Head = '>'
​
if num == worker.Count {
end = total_size // last part
} else {
end = start + partial_size
}
​
worker.SyncWG.Add(1)
go worker.writeRange(num, start, end-1)
start = end
}
go worker.Progress.Bars.Listen()
worker.SyncWG.Wait()
elapsed := time.Since(startTime)
time.Sleep(300 * time.Millisecond) // Wait for progress bar UI to be done.
fmt.Println("Done!")
​
fmt.Println(elapsed)
return nil
}
​
func (w *Worker) writeRange(part_num int, start int, end int) {
defer w.SyncWG.Done()
var written int
body, size, err := w.getRangeBody(part_num, start, end)
if err != nil {
log.Fatalf("Part %d request error: %s\n", part_num, err.Error())
}
defer body.Close()
​
percent_flag := map[int]bool{}
buf := make([]byte, 32*1024) // make a buffer to keep chunks that are read
for {
nr, er := body.Read(buf)
if nr > 0 {
nw, err := w.File.WriteAt(buf[0:nr], int64(start))
if err != nil {
log.Fatalf("Part %d occured error: %s.\n", part_num, err.Error())
}
if nr != nw {
log.Fatalf("Part %d occured error of short writiing.\n", part_num)
}
​
start = int(nw) + start
if nw > 0 {
written += nw
}
​
// Report progress and only report once time by every 1%.
p := int(float32(written) / float32(size) * 100)
_, flagged := percent_flag[p]
if !flagged {
percent_flag[p] = true
w.Progress.Update[part_num](p)
}
}
if er != nil {
if er.Error() == "EOF" {
if size == written {
// Downloading successfully
} else {
log.Fatalf("Part %d unfinished.\n", part_num)
}
break
}
log.Fatalf("Part %d occured error: %s\n", part_num, er.Error())
}
}
}
​
func (w *Worker) getRangeBody(part_num int, start int, end int) (io.ReadCloser, int, error) {
var client http.Client
req, err := http.NewRequest("GET", w.Url, nil)
if err != nil {
return nil, 0, err
}
​
// Set range header
req.Header.Add("Range", "bytes="+strconv.Itoa(start)+"-"+strconv.Itoa(end))
resp, err := client.Do(req)
if err != nil {
return nil, 0, err
}
size, _ := strconv.Atoi(resp.Header["Content-Length"][0])
return resp.Body, size, err
}
​
func getSizeAndCheckRangeSupport(url string) (size int) {
// res, _ := http.Head(url)
// header := res.Header
// accept_ranges, supported := header["Accept-Ranges"]
// if !supported {
// log.Fatal("Doesn't support `Accept-Ranges`.")
// } else if supported && accept_ranges[0] != "bytes" {
// log.Fatal("Support `Accept-Ranges`, but value is not `bytes`.")
// }
// size, _ = strconv.Atoi(header["Content-Length"][0]) // Get the content length.
return
}
​
func getFileName(download_url string) string {
url_struct, _ := url.Parse(download_url)
return filepath.Base(url_struct.Path)
}
​
​
func b64Encode(input string) (string) {
return base64.StdEncoding.EncodeToString([]byte(input))
}
​
func promptPass() (string) {
​
fmt.Print("Enter Password: ")
bytePassword, _ := terminal.ReadPassword(int(syscall.Stdin))
​
pass := string(bytePassword)
​
return strings.TrimSpace(pass)
}
type TokenResponse struct {
Token string `xml:"token"`
}
​
type FileInfoResponse struct {
FileSize int64 `xml:"file>fi_size"`
}

Using FTP is slow and if the distance between the transfers is substantial it is even slower.  The File Fabric for Media and the M-Stream File Acceleration feature is good choice for Media companies who need to share large media files, in a non-proprietary fashion, and who wish to have the option to retain their scripted based workflows.

This post is part of our blog-series for IBC2019 – If you’re meeting us, we’re at Stand B.38F Hall 8.

Facebooktwitterredditpinterestlinkedinmailby feather
The following two tabs change content below.
The Leading Enterprise File Fabric

Latest posts by Storage Made Easy (see all)