The code powering m.abunchtell.com https://m.abunchtell.com
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 

387 lines
11 KiB

  1. import os from 'os';
  2. import cluster from 'cluster';
  3. import dotenv from 'dotenv'
  4. import express from 'express'
  5. import http from 'http'
  6. import redis from 'redis'
  7. import pg from 'pg'
  8. import log from 'npmlog'
  9. import url from 'url'
  10. import WebSocket from 'uws'
  11. import uuid from 'uuid'
  12. const env = process.env.NODE_ENV || 'development'
  13. dotenv.config({
  14. path: env === 'production' ? '.env.production' : '.env'
  15. })
  16. const dbUrlToConfig = (dbUrl) => {
  17. if (!dbUrl) {
  18. return {}
  19. }
  20. const params = url.parse(dbUrl)
  21. const config = {}
  22. if (params.auth) {
  23. [config.user, config.password] = params.auth.split(':')
  24. }
  25. if (params.hostname) {
  26. config.host = params.hostname
  27. }
  28. if (params.port) {
  29. config.port = params.port
  30. }
  31. if (params.pathname) {
  32. config.database = params.params.pathname.split('/')[1]
  33. }
  34. const ssl = params.query && params.query.ssl
  35. if (ssl) {
  36. config.ssl = ssl === 'true' || ssl === '1'
  37. }
  38. return config
  39. }
  40. if (cluster.isMaster) {
  41. // Cluster master
  42. const core = +process.env.STREAMING_CLUSTER_NUM || (env === 'development' ? 1 : Math.max(os.cpus().length - 1, 1))
  43. const fork = () => {
  44. const worker = cluster.fork();
  45. worker.on('exit', (code, signal) => {
  46. log.error(`Worker died with exit code ${code}, signal ${signal} received.`);
  47. setTimeout(() => fork(), 0);
  48. });
  49. };
  50. for (let i = 0; i < core; i++) fork();
  51. log.info(`Starting streaming API server master with ${core} workers`)
  52. } else {
  53. // Cluster worker
  54. const pgConfigs = {
  55. development: {
  56. database: 'mastodon_development',
  57. host: '/var/run/postgresql',
  58. max: 10
  59. },
  60. production: {
  61. user: process.env.DB_USER || 'mastodon',
  62. password: process.env.DB_PASS || '',
  63. database: process.env.DB_NAME || 'mastodon_production',
  64. host: process.env.DB_HOST || 'localhost',
  65. port: process.env.DB_PORT || 5432,
  66. max: 10
  67. }
  68. }
  69. const app = express()
  70. const pgPool = new pg.Pool(Object.assign(pgConfigs[env], dbUrlToConfig(process.env.DATABASE_URL)))
  71. const server = http.createServer(app)
  72. const wss = new WebSocket.Server({ server })
  73. const redisNamespace = process.env.REDIS_NAMESPACE || null
  74. const redisParams = {
  75. host: process.env.REDIS_HOST || '127.0.0.1',
  76. port: process.env.REDIS_PORT || 6379,
  77. password: process.env.REDIS_PASSWORD,
  78. url: process.env.REDIS_URL || null
  79. }
  80. if (redisNamespace) {
  81. redisParams.namespace = redisNamespace
  82. }
  83. const redisPrefix = redisNamespace ? `${redisNamespace}:` : ''
  84. const redisClient = redis.createClient(redisParams)
  85. const subs = {}
  86. redisClient.on('pmessage', (_, channel, message) => {
  87. const callbacks = subs[channel]
  88. log.silly(`New message on channel ${channel}`)
  89. if (!callbacks) {
  90. return
  91. }
  92. callbacks.forEach(callback => callback(message))
  93. })
  94. redisClient.psubscribe(`${redisPrefix}timeline:*`)
  95. const subscribe = (channel, callback) => {
  96. log.silly(`Adding listener for ${channel}`)
  97. subs[channel] = subs[channel] || []
  98. subs[channel].push(callback)
  99. }
  100. const unsubscribe = (channel, callback) => {
  101. log.silly(`Removing listener for ${channel}`)
  102. subs[channel] = subs[channel].filter(item => item !== callback)
  103. }
  104. const allowCrossDomain = (req, res, next) => {
  105. res.header('Access-Control-Allow-Origin', '*')
  106. res.header('Access-Control-Allow-Headers', 'Authorization, Accept, Cache-Control')
  107. res.header('Access-Control-Allow-Methods', 'GET, OPTIONS')
  108. next()
  109. }
  110. const setRequestId = (req, res, next) => {
  111. req.requestId = uuid.v4()
  112. res.header('X-Request-Id', req.requestId)
  113. next()
  114. }
  115. const accountFromToken = (token, req, next) => {
  116. pgPool.connect((err, client, done) => {
  117. if (err) {
  118. next(err)
  119. return
  120. }
  121. client.query('SELECT oauth_access_tokens.resource_owner_id, users.account_id FROM oauth_access_tokens INNER JOIN users ON oauth_access_tokens.resource_owner_id = users.id WHERE oauth_access_tokens.token = $1 LIMIT 1', [token], (err, result) => {
  122. done()
  123. if (err) {
  124. next(err)
  125. return
  126. }
  127. if (result.rows.length === 0) {
  128. err = new Error('Invalid access token')
  129. err.statusCode = 401
  130. next(err)
  131. return
  132. }
  133. req.accountId = result.rows[0].account_id
  134. next()
  135. })
  136. })
  137. }
  138. const authenticationMiddleware = (req, res, next) => {
  139. if (req.method === 'OPTIONS') {
  140. next()
  141. return
  142. }
  143. const authorization = req.get('Authorization')
  144. if (!authorization) {
  145. const err = new Error('Missing access token')
  146. err.statusCode = 401
  147. next(err)
  148. return
  149. }
  150. const token = authorization.replace(/^Bearer /, '')
  151. accountFromToken(token, req, next)
  152. }
  153. const errorMiddleware = (err, req, res, next) => {
  154. log.error(req.requestId, err)
  155. res.writeHead(err.statusCode || 500, { 'Content-Type': 'application/json' })
  156. res.end(JSON.stringify({ error: err.statusCode ? `${err}` : 'An unexpected error occurred' }))
  157. }
  158. const placeholders = (arr, shift = 0) => arr.map((_, i) => `$${i + 1 + shift}`).join(', ');
  159. const streamFrom = (id, req, output, attachCloseHandler, needsFiltering = false) => {
  160. log.verbose(req.requestId, `Starting stream from ${id} for ${req.accountId}`)
  161. const listener = message => {
  162. const { event, payload, queued_at } = JSON.parse(message)
  163. const transmit = () => {
  164. const now = new Date().getTime()
  165. const delta = now - queued_at;
  166. log.silly(req.requestId, `Transmitting for ${req.accountId}: ${event} ${payload} Delay: ${delta}ms`)
  167. output(event, payload)
  168. }
  169. // Only messages that may require filtering are statuses, since notifications
  170. // are already personalized and deletes do not matter
  171. if (needsFiltering && event === 'update') {
  172. pgPool.connect((err, client, done) => {
  173. if (err) {
  174. log.error(err)
  175. return
  176. }
  177. const unpackedPayload = JSON.parse(payload)
  178. const targetAccountIds = [unpackedPayload.account.id].concat(unpackedPayload.mentions.map(item => item.id)).concat(unpackedPayload.reblog ? [unpackedPayload.reblog.account.id] : [])
  179. client.query(`SELECT target_account_id FROM blocks WHERE account_id = $1 AND target_account_id IN (${placeholders(targetAccountIds, 1)}) UNION SELECT target_account_id FROM mutes WHERE account_id = $1 AND target_account_id IN (${placeholders(targetAccountIds, 1)})`, [req.accountId].concat(targetAccountIds), (err, result) => {
  180. done()
  181. if (err) {
  182. log.error(err)
  183. return
  184. }
  185. if (result.rows.length > 0) {
  186. return
  187. }
  188. transmit()
  189. })
  190. })
  191. } else {
  192. transmit()
  193. }
  194. }
  195. subscribe(`${redisPrefix}${id}`, listener)
  196. attachCloseHandler(`${redisPrefix}${id}`, listener)
  197. }
  198. // Setup stream output to HTTP
  199. const streamToHttp = (req, res) => {
  200. res.setHeader('Content-Type', 'text/event-stream')
  201. res.setHeader('Transfer-Encoding', 'chunked')
  202. const heartbeat = setInterval(() => res.write(':thump\n'), 15000)
  203. req.on('close', () => {
  204. log.verbose(req.requestId, `Ending stream for ${req.accountId}`)
  205. clearInterval(heartbeat)
  206. })
  207. return (event, payload) => {
  208. res.write(`event: ${event}\n`)
  209. res.write(`data: ${payload}\n\n`)
  210. }
  211. }
  212. // Setup stream end for HTTP
  213. const streamHttpEnd = req => (id, listener) => {
  214. req.on('close', () => {
  215. unsubscribe(id, listener)
  216. })
  217. }
  218. // Setup stream output to WebSockets
  219. const streamToWs = (req, ws) => {
  220. const heartbeat = setInterval(() => {
  221. // TODO: Can't add multiple listeners, due to the limitation of uws.
  222. if (ws.readyState !== ws.OPEN) {
  223. log.verbose(req.requestId, `Ending stream for ${req.accountId}`)
  224. clearInterval(heartbeat)
  225. return
  226. }
  227. ws.ping()
  228. }, 15000)
  229. return (event, payload) => {
  230. if (ws.readyState !== ws.OPEN) {
  231. log.error(req.requestId, 'Tried writing to closed socket')
  232. return
  233. }
  234. ws.send(JSON.stringify({ event, payload }))
  235. }
  236. }
  237. // Setup stream end for WebSockets
  238. const streamWsEnd = ws => (id, listener) => {
  239. ws.on('close', () => {
  240. unsubscribe(id, listener)
  241. })
  242. ws.on('error', e => {
  243. unsubscribe(id, listener)
  244. })
  245. }
  246. app.use(setRequestId)
  247. app.use(allowCrossDomain)
  248. app.use(authenticationMiddleware)
  249. app.use(errorMiddleware)
  250. app.get('/api/v1/streaming/user', (req, res) => {
  251. streamFrom(`timeline:${req.accountId}`, req, streamToHttp(req, res), streamHttpEnd(req))
  252. })
  253. app.get('/api/v1/streaming/public', (req, res) => {
  254. streamFrom('timeline:public', req, streamToHttp(req, res), streamHttpEnd(req), true)
  255. })
  256. app.get('/api/v1/streaming/public/local', (req, res) => {
  257. streamFrom('timeline:public:local', req, streamToHttp(req, res), streamHttpEnd(req), true)
  258. })
  259. app.get('/api/v1/streaming/hashtag', (req, res) => {
  260. streamFrom(`timeline:hashtag:${req.query.tag}`, req, streamToHttp(req, res), streamHttpEnd(req), true)
  261. })
  262. app.get('/api/v1/streaming/hashtag/local', (req, res) => {
  263. streamFrom(`timeline:hashtag:${req.query.tag}:local`, req, streamToHttp(req, res), streamHttpEnd(req), true)
  264. })
  265. wss.on('connection', ws => {
  266. const location = url.parse(ws.upgradeReq.url, true)
  267. const token = location.query.access_token
  268. const req = { requestId: uuid.v4() }
  269. accountFromToken(token, req, err => {
  270. if (err) {
  271. log.error(req.requestId, err)
  272. ws.close()
  273. return
  274. }
  275. switch(location.query.stream) {
  276. case 'user':
  277. streamFrom(`timeline:${req.accountId}`, req, streamToWs(req, ws), streamWsEnd(ws))
  278. break;
  279. case 'public':
  280. streamFrom('timeline:public', req, streamToWs(req, ws), streamWsEnd(ws), true)
  281. break;
  282. case 'public:local':
  283. streamFrom('timeline:public:local', req, streamToWs(req, ws), streamWsEnd(ws), true)
  284. break;
  285. case 'hashtag':
  286. streamFrom(`timeline:hashtag:${location.query.tag}`, req, streamToWs(req, ws), streamWsEnd(ws), true)
  287. break;
  288. case 'hashtag:local':
  289. streamFrom(`timeline:hashtag:${location.query.tag}:local`, req, streamToWs(req, ws), streamWsEnd(ws), true)
  290. break;
  291. default:
  292. ws.close()
  293. }
  294. })
  295. })
  296. server.listen(process.env.PORT || 4000, () => {
  297. log.level = process.env.LOG_LEVEL || 'verbose'
  298. log.info(`Starting streaming API server worker on ${server.address().address}:${server.address().port}`)
  299. })
  300. process.on('SIGINT', exit)
  301. process.on('SIGTERM', exit)
  302. process.on('exit', exit)
  303. function exit() {
  304. server.close()
  305. }
  306. }