Lighttpd SSL Redirect - windows - redirect

I have a home server that I want to only serve pages via https but I have run into some issues. I have been serving non secure pages OK and could access the pages both on the local network and on the web (I'm using ddns.net and have all the port forwarding covered).
I have test certificates properly installed and at the moment the redirects work fantastically on the local network but NOT from the web. Below are the two redirects I have tested - both work locally but both failed to serve secure pages from the web.
NOTE: I use a non-standard port, i.e port 1080, however as mentioned above, non-secure access is all OK so the port forwarding from my gateway router to the server is (at least I think!) fine. Also, I can only browse to the server when I concatenate the port number to the IP / name, i.e localhost:1080 or 192.168.1.1:1080 (which is fine by me) and thus the redirect filters.
In this instance, I can access the pages bot securely and insecurely from the local network but can NOT access securely from the web.
$HTTP["scheme"] == "http" {
$HTTP["host"] =~ "^(.*):1080" {
url.redirect = (".*" => "https://%1$0")
}
}
$SERVER["socket"] == ":443" {
ssl.engine = "enable"
ssl.pemfile = Var.Doo + "/server.pem"
ssl.ca-file = Var.Doo + "/ca.pem"
setenv.add-environment = ( "HTTPS" => "on" )
}
After some web research, I added a condition to the redirects to be able to handle the non-port concatenated URL, however I can neither access the pages securely nor insecurely from the web (locally still works though).
$HTTP["scheme"] == "http" {
$HTTP["host"] =~ "^(.*):1080" {
url.redirect = (".*" => "https://%1$0")
}
else $HTTP["host"] =~ ".*" {
url.redirect = (".*" => "https://%0$0")
}
}
$SERVER["socket"] == ":443" {
ssl.engine = "enable"
ssl.pemfile = Var.Doo + "/server.pem"
ssl.ca-file = Var.Doo + "/ca.pem"
setenv.add-environment = ( "HTTPS" => "on" )
}
EDIT: OK, 20 views & counting and no suggestion of an answer yet ...
I know I stated above that I believe the port forwarding is all good, but now I am having second thoughts on that. Any pointers either way?

OK, I spent some more time looking at this and managed to resolve the issue, which was two-fold.
As latterly suspected, my initial assumption that the port forwarding was OK turned out to be incorrect as I had not forwarded the secure port (which lighttpd forcefully defaults to), i.e port 443. Thus the first part of the solution was completing the port forwarding on my gateway router to include that route.
The second part of the solution is a textually minor change to the redirect code in the configuration file to filter on the ports rather than the protocol (the former code may also work but have not tested it). Here's the changed and tested code:
$SERVER["socket"] == ":443" {
ssl.engine = "enable"
ssl.pemfile = Var.Doo + "/server.pem"
ssl.ca-file = Var.Doo + "/ca.pem"
setenv.add-environment = ( "HTTPS" => "on" )
}
else $SERVER["socket"] == ":1080" {
$HTTP["host"] =~ "([^:/]+)" {
url.redirect = ( "^/(.*)" => "https://%1:443/$1" )
}
}

Related

Prevent nginx from killing idle tcp sockets

I'm trying to use nginx as a reverse proxy for ssl/tcp sockets (so that I can write my server custom as raw tcp, but have nginx handle the ssl certificates). My use case requires the tcp connections remain alive, but to go idle (no packets back and forth) for extended periods of time (determined by the client, but as long as an hour). Unfortunately, nginx kills my socket connections after the first 10 minutes (timed to within a second) of inactivity, and I haven't been able to find either online or in the docs what actually controls this timeout.
I know that it has to be nginx doing it (not my raw server timing out, or my client's ssl socket), since I can directly connect to the server's raw tcp server without timeout issues, but if I run nginx as a raw tcp reverse proxy (no ssl) it does timeout.
Here's some code to reproduce the issue, note that I've commented out the ssl relevent pieces in nginx because the timeout occurs either way.
/etc/nginx/modules-enabled/test.conf:
stream {
upstream tcp-server {
server localhost:33445;
}
server {
listen 33446;
# listen 33446 ssl;
proxy_pass tcp-server;
# Certs
# ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
# ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
}
}
server.js;
const net = require("net");
const s = net.createServer();
s.on("connection", (sock) => {
console.log('Got connection from', sock.remoteAddress, sock.remotePort );
sock.on("error", (err) => {
console.error(err)
clearInterval(i);
});
sock.on("close", () => {
console.log('lost connection from', sock.remoteAddress, sock.remotePort );
clearInterval(i);
});
});
s.listen(33445);
client.js
const net = require('net');
const host = 'example.com';
let use_tls = false;
let client;
let start = Date.now()
// Use me to circumvent nginx, and no timeout occurs
// let port = 33445;
// Use me to use nginx, and no timeouts occur after 10 mins of no RX/TX
let port = 33446;
client = new net.Socket();
client.connect({ port, host }, function() {
console.log('Connected via TCP');
// Include me, and nginx doesn't kill the socket
// setInterval(() => { client.write("ping") }, 5000);
});
client.on('end', function() {
console.log('Disconnected: ' + ((Date.now() - start)/1000/60) + " mins");
});
I've tried various directives in the nginx stream block, but nothing seems to help. Thanks in advance!

How to redirect requests to another host using ZAP?

I'm new to ZAP and I don't know much about it's js/ecma scripting.
Basically, I was trying to redirect request to another host.
Say an application that is connected to the ZAP proxy makes a request in a URL:
http://www.somesite.com/path/to/a/file
but I want to change the hostname in the URL to:
another.site.com
so it will actually request to: http://www.anothersite.com/path/to/a/file
Here's the code that I was trying to work but the URL remains unchanged in the request.
function proxyRequest(msg) {
// Debugging can be done using println like this
var uri = msg.getRequestHeader().getURI().toString()
var host = msg.getRequestHeader().getURI().getHost().toString()
print('proxyResponse called for url=' + uri)
if (host == 'download.qt.io') {
uri = uri.replace('download.qt.io/online/', 'mirrors.ocf.berkeley.edu/qt/online/')
msg.getRequestHeader().setHeader('Location', uri)
print('proxyRequest changed to url=' + uri)
}
if (host == 'ftp.jaist.ac.jp') {
uri = uri.replace('ftp.jaist.ac.jp/pub/qtproject/online/', 'mirrors.ocf.berkeley.edu/qt/online/')
msg.getRequestHeader().setHeader('Location', uri)
print('proxyRequest changed to url=' + uri)
}
if (host == 'qtproject.mirror.liquidtelecom.com') {
uri = uri.replace('qtproject.mirror.liquidtelecom.com/online/', 'mirrors.ocf.berkeley.edu/qt/online/')
msg.getRequestHeader().setHeader('Location', uri)
print('proxyRequest changed to url=' + uri)
}
return true
}
Option 1: Replacer Rule
Install the Replacer addon, from the marketplace:
Goto the Tools menu and select 'Replacer Options'.
Setup a rule as shown in the following screenshot.
Save/Okay as appropriate.
Now when your browse etc all your traffic will be redirected/rewritten.
Option 2: HttpSender Script
Create a new HttpSender script, similar to the following example:
function sendingRequest(msg, initiator, helper) {
var host = msg.getRequestHeader().getURI().getHost();
if (host.equals("www.somesite.com")) {
uri = msg.getRequestHeader().getURI();
uri.setEscapedAuthority("www.anothersite.com");
msg.getRequestHeader().setURI(uri);
}
return msg;
}
function responseReceived(msg, initiator, helper) {}
Option 3: Hosts File Entry
Goto a command prompt and nslookup www.somesite.com, note the IP address (w.x.y.z).
In your hosts file, add an entry associating the noted IP (w.x.y.z) with www.anothersite.com.
(You may need to restart ZAP/browsers for this change to take effect. On linux you'll likely need to sudo to edit the file, on Windows you'll need to edit it as an admin user.)
(Further details WRT editing your hosts file: https://www.howtogeek.com/howto/27350/beginner-geek-how-to-edit-your-hosts-file/)

How to setup HTTP and HTTPS on the same port with akka-http

I have a Scala app that runs an akka-http webserver on a custom port, let's say 8000.
Until a while ago, it would only handle http:// requests, but recently I switched to https://.
Some of the clients have the link bookmarked and keep getting the no connection error because they try the address with http:// instead of https:// and they keep forgetting why it happens.
I tried binding two services to the same port but failed because only the first one gets binded.
Http().bind(interface = "0.0.0.0", port = Global.settings.restPort, connectionContext = httpsContext)
Http().bind(interface = "0.0.0.0", port = Global.settings.restPort)
All I need from the http:// server is to return a 301 code and redirect to the same address, but with https protocol.
How can I achieve that?
As others have commented, you can't bind the HTTP and HTTPS servers to the same port. You can have both servers running on separate ports and redirect all HTTP traffic to the HTTPS server using Akka-http's scheme() and redirect():
val hostName = "www.example.com"
val portHttp = 8080
val portHttps = 8443
val route =
scheme("http") {
extract(_.request.uri) { uri =>
redirect( uri.withScheme("https").withAuthority(hostName, portHttps),
StatusCodes.MovedPermanently
)
}
} ~
pathSingleSlash {
get {
complete( HttpEntity( ContentTypes.`text/html(UTF-8)`,
"Welcome to Akka-HTTP!"
) )
}
}
Http().bindAndHandle(route, hostName, portHttp)
Http().bindAndHandle(route, hostName, portHttps, connectionContext = httpsContext)
Note that there is no need for applying withAuthority() if you're using standard HTTP and HTTPS ports (i.e. 80 and 443).

Is it possible to secure a ColdFusion 11 REST Service with HTTP BASIC Authentication?

I am setting up a simple REST Service in ColdFusion 11. The web server is IIS 8.5 on Windows Server 2012R2.
This REST Service needs to be secured to prevent unauthorized users from accessing/writing data. For the time being, there will be only one authorized user, so I want to keep authentication/authorization as simple as possible. My initial thought is to use HTTP BASIC Authentication.
Here's the setup for the REST Service:
Source Directory: C:\web\site1\remoteapi\
REST path: inventory
To implement this, I configured the source directory of the REST Service in IIS to authorize only one user, disable Anonymous authentication, and enable Basic authentication.
When I call the source directory directly in a browser (i.e. http://site1/remoteapi/inventory.cfc?method=read), I am presented with the Basic authentication dialog.
However, when I attempt to request the REST path (http://site1/rest/inventory/), I am not challenged at all.
How can I implement HTTP BASIC authentication on the REST path?
So, due to the need to get this done without much delay, I went ahead and using some principles from Ben Nadel's website, I wrote my own authentication into the onRequestStart() method of the REST Service's Application.cfc. Here is the basic code, though it uses hard-coded values in the VARIABLES scope to validate the username and password and also does not include any actual "authorization" setting:
public boolean function onRequestStart(required string targetPage) {
LOCAL.Response = SUPER.onRequestStart(ARGUMENTS.targetpage);
if (!StructKeyExists(GetHTTPRequestData().Headers, "Authorization")) {
cfheader(
name="WWW-Authenticate",
value="Basic realm=""REST API Access"""
);
LOCAL.RESTResponse = {
status = 401,
content = {Message = "Unauthorized"}
};
restSetResponse(LOCAL.RESTResponse);
}
else {
LOCAL.IsAuthenticated = true;
LOCAL.EncodedCredentials =
GetToken( GetHTTPRequestData().Headers.Authorization, 2, " " );
// Credential string is not Base64
if ( !ArrayLen(
REMatch(
"^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{4}|[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)$",
LOCAL.EncodedCredentials
)
)
) {
LOCAL.IsAuthenticated = false;
}
else {
// Convert Base64 to String
LOCAL.Credentials =
ToString(ToBinary( LOCAL.EncodedCredentials ));
LOCAL.Username = GetToken( LOCAL.Credentials, 1, ":" );
LOCAL.Password = GetToken( LOCAL.Credentials, 2, ":" );
if ( LOCAL.Username != VARIABLES.CREDENTIALS.Username
|| LOCAL.Password != VARIABLES.CREDENTIALS.Password
) {
LOCAL.IsAuthenticated = false;
}
}
if (!LOCAL.IsAuthenticated) {
LOCAL.Response = {
status = 403,
content = {Message = "Forbidden"}
};
restSetResponse(LOCAL.Response);
}
}
return LOCAL.Response;
}

fiddler redirect https to localhost

I have an iPhone app that connects to an HTTPS service in Azure. I want to redirect the iPhone calls via Fiddler to http://localhost:19703 where I am running the same service on my local machine for debugging purposes. I am able to redirect the HTTPS service to another HTTPS service using the following Fiddler script. However, if I use the same script to redirect to localhost:19703, it does not work. Any ideas?
if (oSession.HTTPMethodIs("CONNECT") && (oSession.PathAndQuery == "XXXX.azurewebsites.net:443"))
{
oSession["OriginalHostname"] = oSession.hostname;
oSession.PathAndQuery = "YYYY.azurewebsites.net:443";
}
// If it's an HTTPS tunnel, override the certificate
if (oSession.HTTPMethodIs("CONNECT") && (null != oSession["OriginalHostname"]))
{
oSession["x-overrideCertCN"] = oSession["OriginalHostname"];
oSession["X-IgnoreCertCNMismatch"] = "Server's hostname may not match what we're expecting...";
}
oSession.bypassGateway = true;
Try using this approach:
static function OnBeforeRequest(oSession:Fiddler.Session) {
...
if (oSession.HostnameIs("YYYY.azurewebsites.net")) {
oSession.host = "127.0.0.1:19703";
}
...
}
Complete description of the issue is here.