Add a new backend for uloz.to

Note that this temporarily skips uploads of files over 2.5 GB.

See https://github.com/rclone/rclone/pull/7552#issuecomment-1956316492
for details.
This commit is contained in:
iotmaestro 2024-03-26 09:46:47 +00:00 committed by GitHub
parent dfc329c036
commit 4b5c10f72e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 1809 additions and 0 deletions

View File

@ -94,6 +94,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)

View File

@ -53,6 +53,7 @@ import (
_ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/ulozto"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/uptobox"
_ "github.com/rclone/rclone/backend/webdav"

244
backend/ulozto/api/types.go Normal file
View File

@ -0,0 +1,244 @@
// Package api has type definitions for uloz.to
package api
import (
"errors"
"fmt"
"time"
)
// Error is a representation of the JSON structure returned by uloz.to for unsuccessful requests.
type Error struct {
ErrorCode int `json:"error"`
StatusCode int `json:"code"`
Message string `json:"message"`
}
// Error implements error.Error() and returns a string representation of the error.
func (e *Error) Error() string {
out := fmt.Sprintf("Error %d (%d)", e.ErrorCode, e.StatusCode)
if e.Message != "" {
out += ": " + e.Message
}
return out
}
// Is determines if the error is an instance of another error. It's required for the
// errors package to search in causal chain.
func (e *Error) Is(target error) bool {
var err *Error
ok := errors.As(target, &err)
return ok
}
// ListResponseMetadata groups fields common for all API List calls,
// and maps to the Metadata API JSON object.
type ListResponseMetadata struct {
Timestamp time.Time `json:"RunAt"`
Offset int32 `json:"offset"`
Limit int32 `json:"limit"`
ItemsCount int32 `json:"items_count"`
}
// Folder represents a single folder, and maps to the AggregatePrivateViewFolder
// JSON API object.
type Folder struct {
Discriminator string `json:"discriminator"`
Name string `json:"name"`
SanitizedName string `json:"name_sanitized"`
Slug string `json:"slug"`
Status string `json:"status"`
PublicURL string `json:"public_url"`
IsPasswordProtected bool `json:"is_password_protected"`
Type string `json:"type"`
FileManagerLink string `json:"file_manager_link"`
ParentFolderSlug string `json:"parent_folder_slug"`
Privacy string `json:"privacy"`
Created time.Time `json:"created"`
LastUserModified time.Time `json:"last_user_modified"`
HasSubfolder bool `json:"has_subfolder"`
HasTrashedSubfolders bool `json:"has_trashed_subfolders"`
}
// File represents a single file, and maps to the AggregatePrivateViewFileV3
// JSON API object.
type File struct {
Discriminator string `json:"discriminator"`
Slug string `json:"slug"`
URL string `json:"url"`
Realm string `json:"realm"`
Name string `json:"name"`
NameSanitized string `json:"name_sanitized"`
Extension string `json:"extension"`
Filesize int64 `json:"filesize"`
PasswordProtectedFile bool `json:"password_protected_file"`
Description string `json:"description"`
DescriptionSanitized string `json:"description_sanitized"`
IsPorn bool `json:"is_porn"`
Rating int `json:"rating"`
PasswordProtectedArchive bool `json:"password_protected_archive"`
MalwareStatus string `json:"malware_status"`
ContentStatus string `json:"content_status"`
ContentType string `json:"content_type"`
Format struct {
} `json:"format"`
DownloadTypes []interface{} `json:"download_types"`
ThumbnailInfo []interface{} `json:"thumbnail_info"`
PreviewInfo struct {
} `json:"preview_info"`
Privacy string `json:"privacy"`
IsPornByUploader bool `json:"is_porn_by_uploader"`
ExpireDownload int `json:"expire_download"`
ExpireTime time.Time `json:"expire_time"`
UploadTime time.Time `json:"upload_time"`
LastUserModified time.Time `json:"last_user_modified"`
FolderSlug string `json:"folder_slug"`
IsIncomplete bool `json:"is_incomplete"`
IsInTrash bool `json:"is_in_trash"`
Processing struct {
Identify bool `json:"identify"`
Thumbnails bool `json:"thumbnails"`
LivePreview bool `json:"live_preview"`
ArchiveContent bool `json:"archive_content"`
Preview bool `json:"preview"`
} `json:"processing"`
}
// CreateFolderRequest represents the JSON API object
// that's sent to the create folder API endpoint.
type CreateFolderRequest struct {
Name string `json:"name"`
ParentFolderSlug string `json:"parent_folder_slug"`
}
// ListFoldersResponse represents the JSON API object
// that's received from the list folders API endpoint.
type ListFoldersResponse struct {
Metadata ListResponseMetadata `json:"metadata"`
Folder Folder `json:"folder"`
Subfolders []Folder `json:"subfolders"`
}
// ListFilesResponse represents the JSON API object
// that's received from the list files API endpoint.
type ListFilesResponse struct {
Metadata ListResponseMetadata `json:"metadata"`
Items []File `json:"items"`
}
// DeleteFoldersRequest represents the JSON API object
// that's sent to the delete folders API endpoint.
type DeleteFoldersRequest struct {
Slugs []string `json:"slugs"`
}
// CreateUploadURLRequest represents the JSON API object that's
// sent to the API endpoint generating URLs for new file uploads.
type CreateUploadURLRequest struct {
UserLogin string `json:"user_login"`
Realm string `json:"realm"`
ExistingSessionSlug string `json:"private_slug"`
}
// CreateUploadURLResponse represents the JSON API object that's
// received from the API endpoint generating URLs for new file uploads.
type CreateUploadURLResponse struct {
UploadURL string `json:"upload_url"`
PrivateSlug string `json:"private_slug"`
ValidUntil time.Time `json:"valid_until"`
ValidityInterval int64 `json:"validity_interval"`
}
// BatchUpdateFilePropertiesRequest represents the JSON API object that's
// sent to the API endpoint moving the uploaded files from a scratch space
// to their final destination.
type BatchUpdateFilePropertiesRequest struct {
Name string `json:"name"`
FolderSlug string `json:"folder_slug"`
Description string `json:"description"`
Slugs []string `json:"slugs"`
UploadTokens map[string]string `json:"upload_tokens"`
}
// SendFilePayloadResponse represents the JSON API object that's received
// in response to uploading a file's body to the CDN URL.
type SendFilePayloadResponse struct {
Size int `json:"size"`
ContentType string `json:"contentType"`
Md5 string `json:"md5"`
Message string `json:"message"`
ReturnCode int `json:"return_code"`
Slug string `json:"slug"`
}
// CommitUploadBatchRequest represents the JSON API object that's
// sent to the API endpoint marking the upload batch as final.
type CommitUploadBatchRequest struct {
Status string `json:"status"`
OwnerLogin string `json:"owner_login"`
}
// CommitUploadBatchResponse represents the JSON API object that's
// received from the API endpoint marking the upload batch as final.
type CommitUploadBatchResponse struct {
PrivateSlug string `json:"private_slug"`
PublicSlug string `json:"public_slug"`
Status string `json:"status"`
ConfirmedAt time.Time `json:"confirmed_at"`
Discriminator string `json:"discriminator"`
Privacy string `json:"privacy"`
Name time.Time `json:"name"`
PublicURL string `json:"public_url"`
FilesCountOk int `json:"files_count_ok"`
FilesCountTrash int `json:"files_count_trash"`
FilesCountIncomplete int `json:"files_count_incomplete"`
}
// UpdateDescriptionRequest represents the JSON API object that's
// sent to the file modification API endpoint marking the upload batch as final.
type UpdateDescriptionRequest struct {
Description string `json:"description"`
}
// GetDownloadLinkRequest represents the JSON API object that's
// sent to the API endpoint that generates CDN download links for file payloads.
type GetDownloadLinkRequest struct {
Slug string `json:"file_slug"`
UserLogin string `json:"user_login"`
DeviceID string `json:"device_id"`
}
// GetDownloadLinkResponse represents the JSON API object that's
// received from the API endpoint that generates CDN download links for file payloads.
type GetDownloadLinkResponse struct {
Link string `json:"link"`
DownloadURLValidUntil time.Time `json:"download_url_valid_until"`
DownloadURLValidityInterval int `json:"download_url_validity_interval"`
Hash string `json:"hash"`
}
// AuthenticateRequest represents the JSON API object that's sent to the auth API endpoint.
type AuthenticateRequest struct {
Login string `json:"login"`
Password string `json:"password"`
}
// AuthenticateResponse represents the JSON API object that's received from the auth API endpoint.
type AuthenticateResponse struct {
TokenID string `json:"token_id"`
TokenValidityInterval int `json:"token_validity_interval"`
Session struct {
Country string `json:"country"`
IsLimitedCountry bool `json:"is_limited_country"`
User struct {
Login string `json:"login"`
UserID int64 `json:"user_id"`
Credit int64 `json:"credit"`
AvatarURL string `json:"avatar_url"`
FavoritesLink string `json:"favorites_link"`
RootFolderSlug string `json:"root_folder_slug"`
FavoritesFolderSlug string `json:"favorites_folder_slug"`
HasCloud bool `json:"has_cloud"`
} `json:"user"`
} `json:"session"`
}

1187
backend/ulozto/ulozto.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,117 @@
package ulozto
import (
"bytes"
"context"
"errors"
"testing"
"time"
"github.com/rclone/rclone/backend/ulozto/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/require"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestUlozto:",
NilObject: (*Object)(nil),
})
}
// TestListWithoutMetadata verifies that basic operations can be performed even if the remote file wasn't written by
// rclone, or the serialized metadata can't be read.
func TestListWithoutMetadata(t *testing.T) {
const (
remoteName = "TestUlozto:"
payload = "42foobar42"
sha256 = "d41f400003e93eb0891977f525e73ecedfa04272d2036f6137106168ecb196ab"
md5 = "8ad32cfeb3dc0f5092261268f335e0a5"
filesize = len(payload)
)
ctx := context.Background()
fstest.Initialise()
subRemoteName, subRemoteLeaf, err := fstest.RandomRemoteName(remoteName)
require.NoError(t, err)
f, err := fs.NewFs(ctx, subRemoteName)
if errors.Is(err, fs.ErrorNotFoundInConfigFile) {
t.Logf("Didn't find %q in config file - skipping tests", remoteName)
return
}
require.NoError(t, err)
file := fstest.Item{ModTime: time.UnixMilli(123456789), Path: subRemoteLeaf, Size: int64(filesize), Hashes: map[hash.Type]string{
hash.SHA256: sha256,
hash.MD5: md5,
}}
// Create a file with the given content and metadata
obj := fstests.PutTestContents(ctx, t, f, &file, payload, false)
// Verify the file has been uploaded
fstest.CheckListing(t, f, []fstest.Item{file})
// Now delete the description metadata
uloztoObj := obj.(*Object)
err = uloztoObj.updateFileProperties(ctx, api.UpdateDescriptionRequest{
Description: "",
})
require.NoError(t, err)
// Listing the file should still succeed, although with estimated mtime and no hashes
fileWithoutDetails := fstest.Item{Path: subRemoteLeaf, Size: int64(filesize), ModTime: uloztoObj.remoteFsMtime, Hashes: map[hash.Type]string{
hash.SHA256: "",
hash.MD5: "",
}}
fstest.CheckListing(t, f, []fstest.Item{fileWithoutDetails})
mtime := time.UnixMilli(987654321)
// When we update the mtime it should be reflected but hashes should stay intact
require.NoError(t, obj.SetModTime(ctx, mtime))
updatedMtimeFile := fstest.Item{Path: subRemoteLeaf, Size: int64(filesize), ModTime: mtime, Hashes: map[hash.Type]string{
hash.SHA256: "",
hash.MD5: "",
}}
fstest.CheckListing(t, f, []fstest.Item{updatedMtimeFile})
// Tear down
require.NoError(t, operations.Purge(ctx, f, ""))
}
// TestUploadLargeFile verifies that files over the supported threshold are not uploaded.
func TestUploadLargeFile(t *testing.T) {
const (
remoteName = "TestUlozto:"
payload = "foobar"
filesize = maxFileSizeBytes + 1
)
ctx := context.Background()
fstest.Initialise()
subRemoteName, subRemoteLeaf, err := fstest.RandomRemoteName(remoteName)
require.NoError(t, err)
f, err := fs.NewFs(ctx, subRemoteName)
if errors.Is(err, fs.ErrorNotFoundInConfigFile) {
t.Logf("Didn't find %q in config file - skipping tests", remoteName)
return
}
require.NoError(t, err)
file := fstest.Item{ModTime: time.UnixMilli(123456789), Path: subRemoteLeaf, Size: int64(filesize)}
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
// The payload buffer is just a placeholder which shouldn't be used
_, err = f.Put(ctx, bytes.NewBufferString(payload), obji)
require.Error(t, err, "File size over the supported max threshold.")
// Verify the remote stayed intact
fstest.CheckListing(t, f, []fstest.Item{})
}

View File

@ -79,6 +79,7 @@ docs = [
"storj.md",
"sugarsync.md",
"tardigrade.md", # stub only to redirect to storj.md
"ulozto.md",
"uptobox.md",
"union.md",
"webdav.md",

View File

@ -180,6 +180,7 @@ WebDAV or S3, that work out of the box.)
{{< provider name="Synology" home="https://c2.synology.com/en-global/object-storage/overview" config="/s3/#synology-c2" >}}
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
{{< provider name="Uloz.to" home="https://uloz.to" config="/ulozto/" >}}
{{< provider name="Uptobox" home="https://uptobox.com" config="/uptobox/" >}}
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
{{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}}

View File

@ -77,6 +77,7 @@ See the following for detailed instructions for
* [Storj](/storj/)
* [SugarSync](/sugarsync/)
* [Union](/union/)
* [Uloz.to](/ulozto/)
* [Uptobox](/uptobox/)
* [WebDAV](/webdav/)
* [Yandex Disk](/yandex/)

View File

@ -57,6 +57,7 @@ Here is an overview of the major features of each cloud storage system.
| SMB | - | R/W | Yes | No | - | - |
| SugarSync | - | - | No | No | - | - |
| Storj | - | R | No | No | - | - |
| Uloz.to | MD5, SHA256 ¹³ | - | No | Yes | - | - |
| Uptobox | - | - | No | Yes | - | - |
| WebDAV | MD5, SHA1 ³ | R ⁴ | Depends | No | - | - |
| Yandex Disk | MD5 | R/W | No | No | R | - |
@ -100,6 +101,9 @@ hash](https://static.hidrive.com/dev/0001).
It combines SHA1 sums for each 4 KiB block hierarchically to a single
top-level sum.
¹³ Uloz.to provides server-calculated MD5 hash upon file upload. MD5 and SHA256
hashes are client-calculated and stored as metadata fields.
### Hash ###
The cloud storage system supports various hash types of the objects.
@ -523,6 +527,7 @@ upon backend-specific capabilities.
| SMB | No | No | Yes | Yes | No | No | Yes | Yes | No | No | Yes |
| SugarSync | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | No | Yes |
| Storj | Yes ² | Yes | Yes | No | No | Yes | Yes | No | Yes | No | No |
| Uloz.to | No | No | No | No | No | No | No | No | No | No | Yes |
| Uptobox | No | Yes | Yes | Yes | No | No | No | No | No | No | No |
| WebDAV | Yes | Yes | Yes | Yes | No | No | Yes ³ | No | No | Yes | Yes |
| Yandex Disk | Yes | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes |

247
docs/content/ulozto.md Normal file
View File

@ -0,0 +1,247 @@
---
title: "Uloz.to"
description: "Rclone docs for Uloz.to"
versionIntroduced: "v1.66"
---
# {{< icon "fa fa-box-archive" >}} Uloz.to
Paths are specified as `remote:path`
Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
The initial setup for Box involves filling in the user credentials.
`rclone config` walks you through it.
## Configuration
Here is an example of how to make a remote called `remote`. First run:
rclone config
This will guide you through an interactive setup process:
```
No remotes found, make a new one?
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
name> remote
Type of storage to configure.
Choose a number from below, or type in your own value
[snip]
XX / Uloz.to
\ "ulozto"
[snip]
Storage> ulozto
Option app_token.
The application token identifying the app. An app API key can be either found in the API doc
https://uloz.to/upload-resumable-api-beta or obtained from customer service.
Enter a value. Press Enter to leave empty.
app_token> token_value
Option username.
The username of the principal to operate as.
Enter a value. Press Enter to leave empty.
username> user
Option password.
The password for the user.
Enter a value. Press Enter to leave empty.
password> secretPassword
Edit advanced config?
y) Yes
n) No (default)
y/n> n
Keep this "remote" remote?
y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> y
```
Once configured you can then use `rclone` like this,
List folders in root level folder:
rclone lsd remote:
List all the files in your root folder:
rclone ls remote:
To copy a local folder to a Uloz.to folder called backup:
rclone copy /home/source remote:backup
### User credentials
The only reliable method is to authenticate the user using
username and password. Uloz.to offers anb API key as well, but
it's reserved for the use of Uloz.to's in-house application
and using it in different circumstances is unreliable.
### Modification times and hashes
Uloz.to doesn't allow the user to set a custom modification time,
or retrieve the hashes after upload. As a result, the integration
uses a free form field the API provides to encode client-provided
timestamps and hashes. Timestamps are stored with microsecond
precision.
A server calculated MD5 hash of the file is verified upon upload.
Afterwards, the backend only serves the client-side calculated
hashes.
### Restricted filename characters
In addition to the [default restricted characters set](/overview/#restricted-characters)
the following characters are also replaced:
| Character | Value | Replacement |
| --------- |:-----:|:-----------:|
| \ | 0x5C | |
Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8),
as they can't be used in JSON strings.
### Transfers
All files are currently uploaded using a single HTTP request, so
for uploading large files a stable connection is necesary. Rclone will
upload up to `--transfers` chunks at the same time (shared among all
uploads).
### Deleting files
By default, files are moved to the recycle bin whereas folders
are deleted immediately. Trashed files are permanently deleted after
30 days in the recycle bin.
Emptying the trash is currently not implemented in rclone.
### Root folder ID
You can set the `root_folder_slug` for rclone. This is the folder
(identified by its `Folder slug`) that rclone considers to be the root
of your Uloz.to drive.
Normally you will leave this blank and rclone will determine the
correct root to use itself. However you can set this to restrict rclone
to a specific folder hierarchy.
In order to do this you will have to find the `Folder slug` of the
folder you wish to use as root. This will be the last segment
of the URL when you open the relevant folder in the Box web
interface.
For example, for exploring a folder with URL
`https://uloz.to/fm/my-files/foobar`, `foobar` should be used as the
root slug.
`root_folder_slug` can be used alongside a specific path in the remote
path. For example, if your remote's `root_folder_slug` corresponds to `/foo/bar`,
`remote:baz/qux` will refer to `ABSOLUTE_ULOZTO_ROOT/foo/bar/baz/qux`.
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/ulozto/ulozto.go then run make backenddocs" >}}
### Standard options
Here are the Standard options specific to ulozto (Uloz.to).
#### --ulozto-app-token
The application token identifying the app. An app API key can be either found in the API doc https://uloz.to/upload-resumable-api-beta or obtained from customer service.
Properties:
- Config: app_token
- Env Var: RCLONE_ULOZTO_APP_TOKEN
- Type: string
- Required: false
#### --ulozto-username
The username of the principal to operate as.
Properties:
- Config: username
- Env Var: RCLONE_ULOZTO_USERNAME
- Type: string
- Required: false
#### --ulozto-password
The password for the user.
Properties:
- Config: password
- Env Var: RCLONE_ULOZTO_PASSWORD
- Type: string
- Required: false
### Advanced options
Here are the Advanced options specific to ulozto (Uloz.to).
#### --ulozto-root-folder-slug
If set, rclone will use this folder as the root folder for all operations. For example, if the slug identifies 'foo/bar/', 'ulozto:baz' is equivalent to 'ulozto:foo/bar/baz' without any root slug set.
Properties:
- Config: root_folder_slug
- Env Var: RCLONE_ULOZTO_ROOT_FOLDER_SLUG
- Type: string
- Required: false
#### --ulozto-list-page-size
The size of a single page for list commands. 1-500
Properties:
- Config: list_page_size
- Env Var: RCLONE_ULOZTO_LIST_PAGE_SIZE
- Type: int
- Default: 500
#### --ulozto-encoding
The encoding for the backend.
See the [encoding section in the overview](/overview/#encoding) for more info.
Properties:
- Config: encoding
- Env Var: RCLONE_ULOZTO_ENCODING
- Type: Encoding
- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
{{< rem autogenerated options stop >}}
## Limitations
Uloz.to file names can't have the `\` character in. rclone maps this to
and from an identical looking unicode equivalent `` (U+FF3C Fullwidth
Reverse Solidus).
Uloz.to only supports filenames up to 255 characters in length.
Uloz.to rate limits access to the API, but exact details are undisclosed.
Practical testing reveals that hitting the rate limit during normal use
is very rare, although not impossible with higher number of concurrently
uploaded files.
`rclone about` is not supported by the Uloz.to backend. Although
there's an endpoint to retrieve the information for the UI, it's not
exposed in the API. Backends without this capability cannot determine
free space for an rclone mount or use policy `mfs` (most free space)
as a member of an rclone union remote.
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)

View File

@ -100,6 +100,7 @@
<a class="dropdown-item" href="/smb/"><i class="fa fa-server fa-fw"></i> SMB / CIFS</a>
<a class="dropdown-item" href="/storj/"><i class="fas fa-dove fa-fw"></i> Storj</a>
<a class="dropdown-item" href="/sugarsync/"><i class="fas fa-dove fa-fw"></i> SugarSync</a>
<a class="dropdown-item" href="/uloz.to/"><i class="fa fa-box-archive fa-fw"></i> Uloz.to</a>
<a class="dropdown-item" href="/uptobox/"><i class="fa fa-archive fa-fw"></i> Uptobox</a>
<a class="dropdown-item" href="/union/"><i class="fa fa-link fa-fw"></i> Union (merge backends)</a>
<a class="dropdown-item" href="/webdav/"><i class="fa fa-server fa-fw"></i> WebDAV</a>

View File

@ -458,3 +458,6 @@ backends:
- backend: "quatrix"
remote: "TestQuatrix:"
fastlist: false
- backend: "ulozto"
remote: "TestUlozto:"
fastlist: false