Initial commit

This commit is contained in:
2021-12-19 17:30:51 +01:00
commit a589014106
65 changed files with 7437 additions and 0 deletions

68
server/ssh/config.go Normal file
View File

@ -0,0 +1,68 @@
package ssh
import (
c "docker4ssh/config"
"docker4ssh/database"
"fmt"
"golang.org/x/crypto/ssh"
"io/ioutil"
)
func NewSSHConfig(config *c.Config) (*ssh.ServerConfig, error) {
db := database.GetDatabase()
sshConfig := &ssh.ServerConfig{
MaxAuthTries: 3,
PasswordCallback: func(conn ssh.ConnMetadata, password []byte) (*ssh.Permissions, error) {
if containerID, exists := db.GetContainerByAuth(database.NewUnsafeAuth(conn.User(), password)); exists && containerID != "" {
return &ssh.Permissions{
CriticalOptions: map[string]string{
"containerID": containerID,
},
}, nil
} else if profile, ok := profiles.Match(conn.User(), password); ok {
return &ssh.Permissions{
CriticalOptions: map[string]string{
"profile": profile.Name(),
},
}, nil
} else if config.Profile.Dynamic.Enable && dynamicProfile.Match(conn.User(), password) {
return &ssh.Permissions{
CriticalOptions: map[string]string{
"profile": "dynamic",
"image": conn.User(),
},
}, nil
}
// i think logging the wrong password is a bit unsafe.
// if you have e.g. just a type in it isn't very well to see your nearly correct password in the logs
return nil, fmt.Errorf("%s tried to connect with user %s but entered wrong a password", conn.RemoteAddr().String(), conn.User())
},
}
sshConfig.SetDefaults()
key, err := parseSSHPrivateKey(config.SSH.Keyfile, []byte(config.SSH.Passphrase))
if err != nil {
return nil, err
}
sshConfig.AddHostKey(key)
return sshConfig, nil
}
func parseSSHPrivateKey(path string, password []byte) (ssh.Signer, error) {
keyBytes, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
var key ssh.Signer
if len(password) == 0 {
key, err = ssh.ParsePrivateKey(keyBytes)
} else {
key, err = ssh.ParsePrivateKeyWithPassphrase(keyBytes, password)
}
if err != nil {
return nil, err
}
return key, nil
}

201
server/ssh/connection.go Normal file
View File

@ -0,0 +1,201 @@
package ssh
import (
"bytes"
"context"
"database/sql"
"docker4ssh/database"
"docker4ssh/docker"
"docker4ssh/utils"
"fmt"
"go.uber.org/zap"
"strconv"
"sync"
"time"
)
var (
allContainers []*docker.InteractiveContainer
)
func closeAllContainers(ctx context.Context) {
var wg sync.WaitGroup
for _, container := range allContainers {
wg.Add(1)
container := container
go func() {
container.Stop(ctx)
wg.Done()
}()
}
wg.Wait()
}
func connection(client *docker.Client, user *User) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
container, ok := getContainer(ctx, client, user)
if !ok {
zap.S().Errorf("Failed to create container for %s", user.ID)
return
}
user.Container = container.SimpleContainer
var found bool
for _, cont := range allContainers {
if cont == container {
found = true
}
}
if !found {
allContainers = append(allContainers, container)
}
// check if the container is running and start it if not
if running, err := container.Running(ctx); err == nil && !running {
if err = container.Start(ctx); err != nil {
zap.S().Errorf("Failed to start container %s: %v", container.ContainerID, err)
fmt.Fprintln(user.Terminal, "Failed to start container")
return
}
zap.S().Infof("Started container %s with internal id '%s', ip '%s'", container.ContainerID, container.ContainerID, container.Network.IP)
} else if err != nil {
zap.S().Errorf("Failed to get container running state: %v", err)
fmt.Fprintln(user.Terminal, "Failed to check container running state")
}
config := container.Config()
if user.Profile.StartupInformation {
buf := &bytes.Buffer{}
fmt.Fprintf(buf, "┌───Container────────────────┐\r\n")
fmt.Fprintf(buf, "│ Container ID: %-12s │\r\n", container.ContainerID)
fmt.Fprintf(buf, "│ Network Mode: %-12s │\r\n", config.NetworkMode.Name())
fmt.Fprintf(buf, "│ Configurable: %-12t │\r\n", config.Configurable)
fmt.Fprintf(buf, "│ Run Level: %-12s │\r\n", config.RunLevel.Name())
fmt.Fprintf(buf, "│ Exit After: %-12s │\r\n", config.ExitAfter)
fmt.Fprintf(buf, "│ Keep On Exit: %-12t │\r\n", config.KeepOnExit)
fmt.Fprintf(buf, "└──────────────Information───┘\r\n")
user.Terminal.Write(buf.Bytes())
}
// start a new terminal session
if err := container.Terminal(ctx, user.Terminal); err != nil {
zap.S().Errorf("Failed to serve %s terminal: %v", container.ContainerID, err)
fmt.Fprintln(user.Terminal, "Failed to serve terminal")
}
if config.RunLevel == docker.User && container.TerminalCount() == 0 {
if err := container.Stop(ctx); err != nil {
zap.S().Errorf("Error occoured while stopping container %s: %v", container.ContainerID, err)
} else {
lenBefore := len(allContainers)
for i, cont := range allContainers {
if cont == container {
allContainers[i] = allContainers[lenBefore-1]
allContainers = allContainers[:lenBefore-1]
break
}
}
if lenBefore == len(allContainers) {
zap.S().Warnf("Stopped container %s, but failed to remove it from the global container scope", container.ContainerID)
} else {
zap.S().Infof("Stopped container %s", container.ContainerID)
}
}
}
zap.S().Infof("Stopped session for user %s", user.ID)
}
func getContainer(ctx context.Context, client *docker.Client, user *User) (container *docker.InteractiveContainer, ok bool) {
db := database.GetDatabase()
var config docker.Config
// check if the user has a container (id) assigned
if user.Profile.ContainerID != "" {
for _, cont := range allContainers {
if cont.FullContainerID == user.Profile.ContainerID {
return cont, true
}
}
settings, err := db.SettingsByContainerID(user.Profile.ContainerID)
if err != nil {
zap.S().Errorf("Failed to get stored container config for container %s: %v", user.Profile.ContainerID, err)
fmt.Fprintf(user.Terminal, "Could not connect to saved container")
return nil, false
}
config = docker.Config{
NetworkMode: docker.NetworkMode(*settings.NetworkMode),
Configurable: *settings.Configurable,
RunLevel: docker.RunLevel(*settings.RunLevel),
StartupInformation: *settings.StartupInformation,
ExitAfter: *settings.ExitAfter,
KeepOnExit: *settings.KeepOnExit,
}
container, err = docker.InteractiveContainerFromID(ctx, client, config, user.Profile.ContainerID)
if err != nil {
zap.S().Errorf("Failed to get container from id %s: %v", user.Profile.ContainerID, err)
fmt.Fprintf(user.Terminal, "Failed to get container")
return nil, false
}
zap.S().Infof("Re-used container %s for user %s", user.Profile.ContainerID, user.ID)
} else {
config = docker.Config{
NetworkMode: docker.NetworkMode(user.Profile.NetworkMode),
Configurable: user.Profile.Configurable,
RunLevel: docker.RunLevel(user.Profile.RunLevel),
StartupInformation: user.Profile.StartupInformation,
ExitAfter: user.Profile.ExitAfter,
KeepOnExit: user.Profile.KeepOnExit,
}
image, out, err := docker.NewImage(ctx, client.Client, user.Profile.Image)
if err != nil {
zap.S().Errorf("Failed to get '%s' image for profile %s: %v", user.Profile.Image, user.Profile.Name(), err)
fmt.Fprintf(user.Terminal, "Failed to get image %s", image.Ref())
return nil, false
}
if out != nil {
if err := utils.DisplayJSONMessagesStream(out, user.Terminal, user.Terminal); err != nil {
zap.S().Fatalf("Failed to fetch '%s' docker image: %v", image.Ref(), err)
fmt.Fprintf(user.Terminal, "Failed to fetch image %s", image.Ref())
return nil, false
}
}
container, err = docker.NewInteractiveContainer(ctx, client, config, image, strconv.Itoa(int(time.Now().Unix())))
if err != nil {
zap.S().Errorf("Failed to create interactive container: %v", err)
fmt.Fprintln(user.Terminal, "Failed to create interactive container")
return nil, false
}
zap.S().Infof("Created new %s container (%s) for user %s", image.Ref(), container.ContainerID, user.ID)
}
if _, err := db.SettingsByContainerID(container.FullContainerID); err != nil {
if err == sql.ErrNoRows {
rawNetworkMode := int(config.NetworkMode)
rawRunLevel := int(config.RunLevel)
if err := db.SetSettings(container.FullContainerID, database.Settings{
NetworkMode: &rawNetworkMode,
Configurable: &config.Configurable,
RunLevel: &rawRunLevel,
StartupInformation: &config.StartupInformation,
ExitAfter: &config.ExitAfter,
KeepOnExit: &config.KeepOnExit,
}); err != nil {
zap.S().Errorf("Failed to update settings for container %s for user %s: %v", container.ContainerID, user.ID, err)
return nil, false
}
}
}
return container, true
}

79
server/ssh/handle.go Normal file
View File

@ -0,0 +1,79 @@
package ssh
import (
"docker4ssh/docker"
"fmt"
"go.uber.org/zap"
"golang.org/x/crypto/ssh"
)
type RequestType string
const (
RequestPtyReq RequestType = "pty-req"
RequestWindowChange RequestType = "window-change"
)
type PtyReqPayload struct {
Term string
Width, Height uint32
PixelWidth, PixelHeight uint32
Modes []byte
}
func handleChannels(chans <-chan ssh.NewChannel, client *docker.Client, user *User) {
for channel := range chans {
go handleChannel(channel, client, user)
}
}
func handleChannel(channel ssh.NewChannel, client *docker.Client, user *User) {
if t := channel.ChannelType(); t != "session" {
channel.Reject(ssh.UnknownChannelType, fmt.Sprintf("unknown channel type: %s", t))
return
}
conn, requests, err := channel.Accept()
if err != nil {
zap.S().Warnf("Failed to accept channel for user %s", user.ID)
return
}
defer conn.Close()
user.Terminal.ReadWriter = conn
// handle all other request besides the normal user input.
// currently, only 'pty-req' is implemented which determines a terminal size change
go handleRequest(requests, user)
// this handles the actual user terminal connection.
// blocks until the session has finished
connection(client, user)
zap.S().Debugf("Session for user %s ended", user.ID)
}
func handleRequest(requests <-chan *ssh.Request, user *User) {
for request := range requests {
switch RequestType(request.Type) {
case RequestPtyReq:
// this could spam the logs when the user resizes his window constantly
// log()
var ptyReq PtyReqPayload
ssh.Unmarshal(request.Payload, &ptyReq)
user.Terminal.Width = ptyReq.Width
user.Terminal.Height = ptyReq.Height
case RequestWindowChange:
// prevent from logging
default:
zap.S().Debugf("New request from user %s - Type: %s, Want Reply: %t, Payload: '%s'", user.ID, request.Type, request.WantReply, request.Payload)
}
if request.WantReply {
request.Reply(true, nil)
}
}
}

190
server/ssh/ssh.go Normal file
View File

@ -0,0 +1,190 @@
package ssh
import (
"context"
"crypto/md5"
c "docker4ssh/config"
"docker4ssh/database"
"docker4ssh/docker"
"docker4ssh/terminal"
"encoding/hex"
"fmt"
"go.uber.org/zap"
"golang.org/x/crypto/ssh"
"net"
"regexp"
"strings"
)
var (
users = make([]*User, 0)
profiles c.Profiles
dynamicProfile c.Profile
)
type User struct {
*ssh.ServerConn
ID string
IP string
Profile *c.Profile
Terminal *terminal.Terminal
Container *docker.SimpleContainer
}
func GetUser(ip string) *User {
for _, user := range users {
if container := user.Container; container != nil && container.Network.IP == ip {
return user
}
}
return nil
}
type extras struct {
containerID string
}
func StartServing(config *c.Config, serverConfig *ssh.ServerConfig) (errChan chan error, closer func() error) {
errChan = make(chan error, 1)
var err error
profiles, err = c.LoadProfileDir(config.Profile.Dir, c.DefaultPreProfileFromConfig(config))
if err != nil {
errChan <- err
return
}
zap.S().Debugf("Loaded %d profile(s)", len(profiles))
if config.Profile.Dynamic.Enable {
dynamicProfile, err = c.DynamicProfileFromConfig(config, c.DefaultPreProfileFromConfig(config))
if err != nil {
errChan <- err
return
}
zap.S().Debugf("Loaded dynamic profile")
}
cli, err := docker.InitCli()
if err != nil {
errChan <- err
return
}
zap.S().Debugf("Initialized docker cli")
network, err := docker.InitNetwork(context.Background(), cli, config)
if err != nil {
errChan <- err
return
}
zap.S().Debugf("Initialized docker networks")
client := &docker.Client{
Client: cli,
Database: database.GetDatabase(),
Network: network,
}
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", config.SSH.Port))
if err != nil {
errChan <- err
return
}
zap.S().Debugf("Created ssh listener")
var closed bool
go func() {
db := database.GetDatabase()
for {
conn, err := listener.Accept()
if err != nil {
if closed {
return
}
zap.S().Errorf("Failed to accept new ssh user: %v", err)
continue
}
serverConn, chans, requests, err := ssh.NewServerConn(conn, serverConfig)
if err != nil {
zap.S().Errorf("Failed to establish new ssh connection: %v", err)
continue
}
idBytes := md5.Sum([]byte(strings.Split(serverConn.User(), ":")[0]))
idString := hex.EncodeToString(idBytes[:])
zap.S().Infof("New ssh connection from %s with %s (%s)", serverConn.RemoteAddr().String(), serverConn.ClientVersion(), idString)
var profile *c.Profile
if name, ok := serverConn.Permissions.CriticalOptions["profile"]; ok {
if name == "dynamic" {
if image, ok := serverConn.Permissions.CriticalOptions["image"]; ok {
tempDynamicProfile := dynamicProfile
tempDynamicProfile.Image = image
profile = &tempDynamicProfile
}
}
if profile == nil {
if profile, ok = profiles.GetByName(name); !ok {
zap.S().Errorf("Failed to get profile %s", name)
continue
}
}
} else if containerID, ok := serverConn.Permissions.CriticalOptions["containerID"]; ok {
if settings, err := db.SettingsByContainerID(containerID); err == nil {
profile = &c.Profile{
NetworkMode: *settings.NetworkMode,
Configurable: *settings.Configurable,
RunLevel: *settings.RunLevel,
StartupInformation: *settings.StartupInformation,
ExitAfter: *settings.ExitAfter,
KeepOnExit: *settings.KeepOnExit,
ContainerID: containerID,
}
} else {
for _, container := range allContainers {
if container.ContainerID == containerID {
cconfig := c.GetConfig()
profile = &c.Profile{
Password: regexp.MustCompile(cconfig.Profile.Default.Password),
NetworkMode: cconfig.Profile.Default.NetworkMode,
Configurable: cconfig.Profile.Default.Configurable,
RunLevel: cconfig.Profile.Default.RunLevel,
StartupInformation: cconfig.Profile.Default.StartupInformation,
ExitAfter: cconfig.Profile.Default.ExitAfter,
KeepOnExit: cconfig.Profile.Default.KeepOnExit,
Image: "",
ContainerID: containerID,
}
}
}
}
}
zap.S().Debugf("User %s has profile %s", idString, profile.Name())
user := &User{
ServerConn: serverConn,
ID: idString,
Terminal: &terminal.Terminal{},
Profile: profile,
}
users = append(users, user)
go ssh.DiscardRequests(requests)
go handleChannels(chans, client, user)
}
}()
return errChan, func() error {
closed = true
// close all containers
closeAllContainers(context.Background())
// close the listener
return listener.Close()
}
}