Explorar el Código

Add new per-client peak metrics

- Fix: Don't count DNS successes in quality metrics when the input is an IP
  address, as that doesn't use the resolver.

- Fix: sshClient.handleTCPChannel split tunnel
  (protocol.CHANNEL_REJECT_REASON_SPLIT_TUNNEL) cases incorrectly continued
  to do an upstream dial.
Rod Hynes hace 4 años
padre
commit
9152fee4b7
Se han modificado 3 ficheros con 334 adiciones y 49 borrados
  1. 14 0
      psiphon/server/config.go
  2. 109 4
      psiphon/server/server_test.go
  3. 211 45
      psiphon/server/tunnelServer.go

+ 14 - 0
psiphon/server/config.go

@@ -59,6 +59,7 @@ const (
 	SSH_PASSWORD_BYTE_LENGTH                            = 32
 	SSH_RSA_HOST_KEY_BITS                               = 2048
 	SSH_OBFUSCATED_KEY_BYTE_LENGTH                      = 32
+	PEAK_UPSTREAM_FAILURE_RATE_MINIMUM_SAMPLE_SIZE      = 10
 	PERIODIC_GARBAGE_COLLECTION                         = 120 * time.Second
 	STOP_ESTABLISH_TUNNELS_ESTABLISHED_CLIENT_THRESHOLD = 20
 	DEFAULT_LOG_FILE_REOPEN_RETRIES                     = 25
@@ -314,6 +315,13 @@ type Config struct {
 	// The default, 0, disables load logging.
 	LoadMonitorPeriodSeconds int
 
+	// PeakUpstreamFailureRateMinimumSampleSize specifies the minimum number
+	// of samples (e.g., upstream port forward attempts) that are required
+	// before taking a failure rate snapshot which may be recorded as
+	// peak_dns_failure_rate/peak_tcp_port_forward_failure_rate.  The default
+	// is PEAK_UPSTREAM_FAILURE_RATE_SAMPLE_SIZE.
+	PeakUpstreamFailureRateMinimumSampleSize *int
+
 	// ProcessProfileOutputDirectory is the path of a directory to which
 	// process profiles will be written when signaled with SIGUSR2. The
 	// files are overwritten on each invocation. When set to the default
@@ -424,6 +432,7 @@ type Config struct {
 
 	sshBeginHandshakeTimeout                       time.Duration
 	sshHandshakeTimeout                            time.Duration
+	peakUpstreamFailureRateMinimumSampleSize       int
 	periodicGarbageCollection                      time.Duration
 	stopEstablishTunnelsEstablishedClientThreshold int
 	dumpProfilesOnStopEstablishTunnelsDone         int32
@@ -623,6 +632,11 @@ func LoadConfig(configJSON []byte) (*Config, error) {
 		}
 	}
 
+	config.peakUpstreamFailureRateMinimumSampleSize = PEAK_UPSTREAM_FAILURE_RATE_MINIMUM_SAMPLE_SIZE
+	if config.PeakUpstreamFailureRateMinimumSampleSize != nil {
+		config.peakUpstreamFailureRateMinimumSampleSize = *config.PeakUpstreamFailureRateMinimumSampleSize
+	}
+
 	config.periodicGarbageCollection = PERIODIC_GARBAGE_COLLECTION
 	if config.PeriodicGarbageCollectionSeconds != nil {
 		config.periodicGarbageCollection = time.Duration(*config.PeriodicGarbageCollectionSeconds) * time.Second

+ 109 - 4
psiphon/server/server_test.go

@@ -849,6 +849,9 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 	// TODO: test that the concurrency limit is correctly enforced.
 	serverConfig["MaxConcurrentSSHHandshakes"] = 1
 
+	// Ensure peak failure rate log fields for a single port forward attempt
+	serverConfig["PeakUpstreamFailureRateMinimumSampleSize"] = 1
+
 	// Exercise this option.
 	serverConfig["PeriodicGarbageCollectionSeconds"] = 1
 
@@ -1350,6 +1353,15 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 		}
 	}
 
+	// Trigger server_load logging once more, to exercise
+	// sshClient.peakMetrics. As we don't have a reference to the server's
+	// Support struct, we can't invoke logServerLoad directly and there's a
+	// potential race between asynchronous logServerLoad invocation and
+	// client shutdown. For now, we sleep as a workaround.
+
+	p.Signal(syscall.SIGUSR2)
+	time.Sleep(1 * time.Second)
+
 	// Shutdown to ensure logs/notices are flushed
 
 	stopClient()
@@ -1482,6 +1494,11 @@ func checkExpectedServerTunnelLogFields(
 		"established_tunnels_count",
 		"network_latency_multiplier",
 		"network_type",
+
+		// The test run ensures that logServerLoad is invoked while the client
+		// is connected, so the following must be logged.
+		"peak_concurrent_proximate_accepted_clients",
+		"peak_concurrent_proximate_established_clients",
 	} {
 		if fields[name] == nil || fmt.Sprintf("%s", fields[name]) == "" {
 			return fmt.Errorf("missing expected field '%s'", name)
@@ -1506,7 +1523,99 @@ func checkExpectedServerTunnelLogFields(
 		}
 	}
 
+	if fields["network_type"].(string) != testNetworkType {
+		return fmt.Errorf("unexpected network_type '%s'", fields["network_type"])
+	}
+
+	// With interruptions, timeouts, and retries in some tests, there may be
+	// more than one dangling accepted_client.
+
+	peakConcurrentProximateAcceptedClients :=
+		int(fields["peak_concurrent_proximate_accepted_clients"].(float64))
+	if peakConcurrentProximateAcceptedClients < 0 ||
+		peakConcurrentProximateAcceptedClients > 10 {
+		return fmt.Errorf(
+			"unexpected peak_concurrent_proximate_accepted_clients '%v'",
+			fields["peak_concurrent_proximate_accepted_clients"])
+	}
+
+	peakConcurrentProximateEstablishedClients :=
+		int(fields["peak_concurrent_proximate_established_clients"].(float64))
+	if peakConcurrentProximateEstablishedClients != 0 {
+		return fmt.Errorf(
+			"unexpected peak_concurrent_proximate_established_clients '%v'",
+			fields["peak_concurrent_proximate_established_clients"])
+	}
+
+	// In some negative test cases, no port forwards are attempted, in which
+	// case these fields are not logged.
+
+	if expectTCPDataTransfer {
+
+		if fields["peak_tcp_port_forward_failure_rate"] == nil {
+			return fmt.Errorf("missing expected field 'peak_tcp_port_forward_failure_rate'")
+		}
+		if fields["peak_tcp_port_forward_failure_rate"].(float64) != 0.0 {
+			return fmt.Errorf(
+				"unexpected peak_tcp_port_forward_failure_rate '%v'",
+				fields["peak_tcp_port_forward_failure_rate"])
+		}
+
+		if fields["peak_tcp_port_forward_failure_rate_sample_size"] == nil {
+			return fmt.Errorf("missing expected field 'peak_tcp_port_forward_failure_rate_sample_size'")
+		}
+		if fields["peak_tcp_port_forward_failure_rate_sample_size"].(float64) <= 0.0 {
+			return fmt.Errorf(
+				"unexpected peak_tcp_port_forward_failure_rate_sample_size '%v'",
+				fields["peak_tcp_port_forward_failure_rate_sample_size"])
+		}
+
+	} else {
+
+		if fields["peak_tcp_port_forward_failure_rate"] != nil {
+			return fmt.Errorf("unexpected field 'peak_tcp_port_forward_failure_rate'")
+		}
+
+		if fields["peak_tcp_port_forward_failure_rate_sample_size"] != nil {
+			return fmt.Errorf("unexpected field 'peak_tcp_port_forward_failure_rate_sample_size'")
+		}
+	}
+
+	if expectUDPDataTransfer {
+
+		if fields["peak_dns_failure_rate"] == nil {
+			return fmt.Errorf("missing expected field 'peak_dns_failure_rate'")
+		}
+		if fields["peak_dns_failure_rate"].(float64) != 0.0 {
+			return fmt.Errorf(
+				"unexpected peak_dns_failure_rate '%v'", fields["peak_dns_failure_rate"])
+		}
+
+		if fields["peak_dns_failure_rate_sample_size"] == nil {
+			return fmt.Errorf("missing expected field 'peak_dns_failure_rate_sample_size'")
+		}
+		if fields["peak_dns_failure_rate_sample_size"].(float64) <= 0.0 {
+			return fmt.Errorf(
+				"unexpected peak_dns_failure_rate_sample_size '%v'",
+				fields["peak_dns_failure_rate_sample_size"])
+		}
+
+	} else {
+
+		if fields["peak_dns_failure_rate"] != nil {
+			return fmt.Errorf("unexpected field 'peak_dns_failure_rate'")
+		}
+
+		if fields["peak_dns_failure_rate_sample_size"] != nil {
+			return fmt.Errorf("unexpected field 'peak_dns_failure_rate_sample_size'")
+		}
+	}
+
+	// TODO: the following cases should check that fields are not logged when
+	// not expected.
+
 	if runConfig.doSplitTunnel {
+
 		if fields["split_tunnel"] == nil {
 			return fmt.Errorf("missing expected field 'split_tunnel'")
 		}
@@ -1694,10 +1803,6 @@ func checkExpectedServerTunnelLogFields(
 		}
 	}
 
-	if fields["network_type"].(string) != testNetworkType {
-		return fmt.Errorf("unexpected network_type '%s'", fields["network_type"])
-	}
-
 	var checkTCPMetric func(float64) bool
 	if expectTCPPortForwardDial {
 		checkTCPMetric = func(f float64) bool { return f > 0 }

+ 211 - 45
psiphon/server/tunnelServer.go

@@ -894,8 +894,12 @@ func (sshServer *sshServer) getLoadStats() (
 
 		// Every client.qualityMetrics DNS map has an "ALL" entry.
 
+		totalDNSCount := int64(0)
+		totalDNSFailedCount := int64(0)
+
 		for key, value := range client.qualityMetrics.DNSCount {
 			upstreamStats["dns_count"].(map[string]int64)[key] += value
+			totalDNSCount += value
 		}
 
 		for key, value := range client.qualityMetrics.DNSDuration {
@@ -904,17 +908,144 @@ func (sshServer *sshServer) getLoadStats() (
 
 		for key, value := range client.qualityMetrics.DNSFailedCount {
 			upstreamStats["dns_failed_count"].(map[string]int64)[key] += value
+			totalDNSFailedCount += value
 		}
 
 		for key, value := range client.qualityMetrics.DNSFailedDuration {
 			upstreamStats["dns_failed_duration"].(map[string]int64)[key] += int64(value / time.Millisecond)
 		}
 
+		// Update client peak failure rate metrics, to be recorded in
+		// server_tunnel.
+		//
+		// Limitations:
+		//
+		// - This is a simple data sampling that doesn't require additional
+		//   timers or tracking logic. Since the rates are calculated on
+		//   getLoadStats events and using accumulated counts, these peaks
+		//   only represent the highest failure rate within a
+		//   Config.LoadMonitorPeriodSeconds non-sliding window. There is no
+		//   sample recorded for short tunnels with no overlapping
+		//   getLoadStats event.
+		//
+		// - There is no minimum sample window, as a getLoadStats event may
+		//   occur immediately after a client first connects. This may be
+		//   compensated for by adjusting
+		//   Config.PeakUpstreamFailureRateMinimumSampleSize, so as to only
+		//   consider failure rates with a larger number of samples.
+		//
+		// - Non-UDP "failures" are not currently tracked.
+
+		minimumSampleSize := int64(sshServer.support.Config.peakUpstreamFailureRateMinimumSampleSize)
+
+		sampleSize := client.qualityMetrics.TCPPortForwardDialedCount +
+			client.qualityMetrics.TCPPortForwardFailedCount
+
+		if sampleSize >= minimumSampleSize {
+
+			TCPPortForwardFailureRate := float64(client.qualityMetrics.TCPPortForwardFailedCount) /
+				float64(sampleSize)
+
+			if client.peakMetrics.TCPPortForwardFailureRate == nil {
+
+				client.peakMetrics.TCPPortForwardFailureRate = new(float64)
+				*client.peakMetrics.TCPPortForwardFailureRate = TCPPortForwardFailureRate
+				client.peakMetrics.TCPPortForwardFailureRateSampleSize = new(int64)
+				*client.peakMetrics.TCPPortForwardFailureRateSampleSize = sampleSize
+
+			} else if *client.peakMetrics.TCPPortForwardFailureRate < TCPPortForwardFailureRate {
+
+				*client.peakMetrics.TCPPortForwardFailureRate = TCPPortForwardFailureRate
+				*client.peakMetrics.TCPPortForwardFailureRateSampleSize = sampleSize
+			}
+		}
+
+		sampleSize = totalDNSCount + totalDNSFailedCount
+
+		if sampleSize >= minimumSampleSize {
+
+			DNSFailureRate := float64(totalDNSFailedCount) / float64(sampleSize)
+
+			if client.peakMetrics.DNSFailureRate == nil {
+
+				client.peakMetrics.DNSFailureRate = new(float64)
+				*client.peakMetrics.DNSFailureRate = DNSFailureRate
+				client.peakMetrics.DNSFailureRateSampleSize = new(int64)
+				*client.peakMetrics.DNSFailureRateSampleSize = sampleSize
+
+			} else if *client.peakMetrics.DNSFailureRate < DNSFailureRate {
+
+				*client.peakMetrics.DNSFailureRate = DNSFailureRate
+				*client.peakMetrics.DNSFailureRateSampleSize = sampleSize
+			}
+		}
+
+		// Reset quality metrics counters
+
 		client.qualityMetrics.reset()
 
 		client.Unlock()
 	}
 
+	for _, client := range sshServer.clients {
+
+		client.Lock()
+
+		// Update client peak proximate (same region) concurrently connected
+		// (other clients) client metrics, to be recorded in server_tunnel.
+		// This operation requires a second loop over sshServer.clients since
+		// established_clients is calculated in the first loop.
+		//
+		// Limitations:
+		//
+		// - This is an approximation, not a true peak, as it only samples
+		//   data every Config.LoadMonitorPeriodSeconds period. There is no
+		//   sample recorded for short tunnels with no overlapping
+		//   getLoadStats event.
+		//
+		// - The "-1" calculation counts all but the current client as other
+		//   clients; it can be the case that the same client has a dangling
+		//   accepted connection that has yet to time-out server side. Due to
+		//   NAT, we can't determine if the client is the same based on
+		//   network address. For established clients,
+		//   registerEstablishedClient ensures that any previous connection
+		//   is first terminated, although this is only for the same
+		//   session_id. Concurrent proximate clients may be considered an
+		//   exact number of other _network connections_, even from the same
+		//   client.
+
+		region := client.geoIPData.Country
+		stats := regionStats[region]["ALL"]
+
+		n := stats["accepted_clients"].(int64) - 1
+		if n >= 0 {
+			if client.peakMetrics.concurrentProximateAcceptedClients == nil {
+
+				client.peakMetrics.concurrentProximateAcceptedClients = new(int64)
+				*client.peakMetrics.concurrentProximateAcceptedClients = n
+
+			} else if *client.peakMetrics.concurrentProximateAcceptedClients < n {
+
+				*client.peakMetrics.concurrentProximateAcceptedClients = n
+			}
+		}
+
+		n = stats["established_clients"].(int64) - 1
+		if n >= 0 {
+			if client.peakMetrics.concurrentProximateEstablishedClients == nil {
+
+				client.peakMetrics.concurrentProximateEstablishedClients = new(int64)
+				*client.peakMetrics.concurrentProximateEstablishedClients = n
+
+			} else if *client.peakMetrics.concurrentProximateEstablishedClients < n {
+
+				*client.peakMetrics.concurrentProximateEstablishedClients = n
+			}
+		}
+
+		client.Unlock()
+	}
+
 	return upstreamStats, protocolStats, regionStats
 }
 
@@ -1280,6 +1411,7 @@ type sshClient struct {
 	postHandshakeRandomStreamMetrics     randomStreamMetrics
 	sendAlertRequests                    chan protocol.AlertRequest
 	sentAlertRequests                    map[string]bool
+	peakMetrics                          peakMetrics
 }
 
 type trafficState struct {
@@ -1294,11 +1426,20 @@ type trafficState struct {
 }
 
 type randomStreamMetrics struct {
-	count                 int
-	upstreamBytes         int
-	receivedUpstreamBytes int
-	downstreamBytes       int
-	sentDownstreamBytes   int
+	count                 int64
+	upstreamBytes         int64
+	receivedUpstreamBytes int64
+	downstreamBytes       int64
+	sentDownstreamBytes   int64
+}
+
+type peakMetrics struct {
+	concurrentProximateAcceptedClients    *int64
+	concurrentProximateEstablishedClients *int64
+	TCPPortForwardFailureRate             *float64
+	TCPPortForwardFailureRateSampleSize   *int64
+	DNSFailureRate                        *float64
+	DNSFailureRateSampleSize              *int64
 }
 
 // qualityMetrics records upstream TCP dial attempts and
@@ -2433,10 +2574,10 @@ func (sshClient *sshClient) handleNewRandomStreamChannel(
 		upstream.Wait()
 
 		sshClient.Lock()
-		metrics.upstreamBytes += request.UpstreamBytes
-		metrics.receivedUpstreamBytes += received
-		metrics.downstreamBytes += request.DownstreamBytes
-		metrics.sentDownstreamBytes += sent
+		metrics.upstreamBytes += int64(request.UpstreamBytes)
+		metrics.receivedUpstreamBytes += int64(received)
+		metrics.downstreamBytes += int64(request.DownstreamBytes)
+		metrics.sentDownstreamBytes += int64(sent)
 		sshClient.Unlock()
 
 		channel.Close()
@@ -2709,6 +2850,23 @@ func (sshClient *sshClient) logTunnel(additionalMetrics []LogFields) {
 	logFields["random_stream_downstream_bytes"] = sshClient.postHandshakeRandomStreamMetrics.downstreamBytes
 	logFields["random_stream_sent_downstream_bytes"] = sshClient.postHandshakeRandomStreamMetrics.sentDownstreamBytes
 
+	// Only log fields for peakMetrics when there is data recorded, otherwise
+	// omit the field.
+	if sshClient.peakMetrics.concurrentProximateAcceptedClients != nil {
+		logFields["peak_concurrent_proximate_accepted_clients"] = *sshClient.peakMetrics.concurrentProximateAcceptedClients
+	}
+	if sshClient.peakMetrics.concurrentProximateEstablishedClients != nil {
+		logFields["peak_concurrent_proximate_established_clients"] = *sshClient.peakMetrics.concurrentProximateEstablishedClients
+	}
+	if sshClient.peakMetrics.TCPPortForwardFailureRate != nil && sshClient.peakMetrics.TCPPortForwardFailureRateSampleSize != nil {
+		logFields["peak_tcp_port_forward_failure_rate"] = *sshClient.peakMetrics.TCPPortForwardFailureRate
+		logFields["peak_tcp_port_forward_failure_rate_sample_size"] = *sshClient.peakMetrics.TCPPortForwardFailureRateSampleSize
+	}
+	if sshClient.peakMetrics.DNSFailureRate != nil && sshClient.peakMetrics.DNSFailureRateSampleSize != nil {
+		logFields["peak_dns_failure_rate"] = *sshClient.peakMetrics.DNSFailureRate
+		logFields["peak_dns_failure_rate_sample_size"] = *sshClient.peakMetrics.DNSFailureRateSampleSize
+	}
+
 	// Pre-calculate a total-tunneled-bytes field. This total is used
 	// extensively in analytics and is more performant when pre-calculated.
 	logFields["bytes"] = sshClient.tcpTrafficState.bytesUp +
@@ -3791,53 +3949,59 @@ func (sshClient *sshClient) handleTCPChannel(
 
 	dialStartTime := time.Now()
 
-	log.WithTraceFields(LogFields{"hostToConnect": hostToConnect}).Debug("resolving")
+	IP := net.ParseIP(hostToConnect)
 
-	ctx, cancelCtx := context.WithTimeout(sshClient.runCtx, remainingDialTimeout)
-	IPs, err := (&net.Resolver{}).LookupIPAddr(ctx, hostToConnect)
-	cancelCtx() // "must be called or the new context will remain live until its parent context is cancelled"
+	if IP == nil {
 
-	resolveElapsedTime := time.Since(dialStartTime)
+		// Resolve the hostname
 
-	// Record DNS metrics. If LookupIPAddr returns net.DNSError.IsNotFound, this
-	// is "no such host" and not a DNS failure. Limitation: the resolver IP is
-	// not known.
+		log.WithTraceFields(LogFields{"hostToConnect": hostToConnect}).Debug("resolving")
 
-	dnsErr, ok := err.(*net.DNSError)
-	dnsNotFound := ok && dnsErr.IsNotFound
-	dnsSuccess := err == nil || dnsNotFound
-	sshClient.updateQualityMetricsWithDNSResult(dnsSuccess, resolveElapsedTime, nil)
+		ctx, cancelCtx := context.WithTimeout(sshClient.runCtx, remainingDialTimeout)
+		IPs, err := (&net.Resolver{}).LookupIPAddr(ctx, hostToConnect)
+		cancelCtx() // "must be called or the new context will remain live until its parent context is cancelled"
 
-	// IPv4 is preferred in case the host has limited IPv6 routing. IPv6 is
-	// selected and attempted only when there's no IPv4 option.
-	// TODO: shuffle list to try other IPs?
+		resolveElapsedTime := time.Since(dialStartTime)
 
-	var IP net.IP
-	for _, ip := range IPs {
-		if ip.IP.To4() != nil {
-			IP = ip.IP
-			break
+		// Record DNS metrics. If LookupIPAddr returns net.DNSError.IsNotFound, this
+		// is "no such host" and not a DNS failure. Limitation: the resolver IP is
+		// not known.
+
+		dnsErr, ok := err.(*net.DNSError)
+		dnsNotFound := ok && dnsErr.IsNotFound
+		dnsSuccess := err == nil || dnsNotFound
+		sshClient.updateQualityMetricsWithDNSResult(dnsSuccess, resolveElapsedTime, nil)
+
+		// IPv4 is preferred in case the host has limited IPv6 routing. IPv6 is
+		// selected and attempted only when there's no IPv4 option.
+		// TODO: shuffle list to try other IPs?
+
+		for _, ip := range IPs {
+			if ip.IP.To4() != nil {
+				IP = ip.IP
+				break
+			}
+		}
+		if IP == nil && len(IPs) > 0 {
+			// If there are no IPv4 IPs, the first IP is IPv6.
+			IP = IPs[0].IP
 		}
-	}
-	if IP == nil && len(IPs) > 0 {
-		// If there are no IPv4 IPs, the first IP is IPv6.
-		IP = IPs[0].IP
-	}
 
-	if err == nil && IP == nil {
-		err = std_errors.New("no IP address")
-	}
+		if err == nil && IP == nil {
+			err = std_errors.New("no IP address")
+		}
 
-	if err != nil {
+		if err != nil {
 
-		// Record a port forward failure
-		sshClient.updateQualityMetricsWithDialResult(false, resolveElapsedTime, IP)
+			// Record a port forward failure
+			sshClient.updateQualityMetricsWithDialResult(false, resolveElapsedTime, IP)
 
-		sshClient.rejectNewChannel(newChannel, fmt.Sprintf("LookupIP failed: %s", err))
-		return
-	}
+			sshClient.rejectNewChannel(newChannel, fmt.Sprintf("LookupIP failed: %s", err))
+			return
+		}
 
-	remainingDialTimeout -= resolveElapsedTime
+		remainingDialTimeout -= resolveElapsedTime
+	}
 
 	if remainingDialTimeout <= 0 {
 		sshClient.rejectNewChannel(newChannel, "TCP port forward timed out resolving")
@@ -3883,9 +4047,11 @@ func (sshClient *sshClient) handleTCPChannel(
 			if !sshClient.isIPPermitted(IP) {
 				// Note: not recording a port forward failure in this case
 				sshClient.rejectNewChannel(newChannel, "port forward not permitted")
+				return
 			}
 
 			newChannel.Reject(protocol.CHANNEL_REJECT_REASON_SPLIT_TUNNEL, "")
+			return
 		}
 	}
 
@@ -3907,7 +4073,7 @@ func (sshClient *sshClient) handleTCPChannel(
 
 	log.WithTraceFields(LogFields{"remoteAddr": remoteAddr}).Debug("dialing")
 
-	ctx, cancelCtx = context.WithTimeout(sshClient.runCtx, remainingDialTimeout)
+	ctx, cancelCtx := context.WithTimeout(sshClient.runCtx, remainingDialTimeout)
 	fwdConn, err := (&net.Dialer{}).DialContext(ctx, "tcp", remoteAddr)
 	cancelCtx() // "must be called or the new context will remain live until its parent context is cancelled"