I'm building a large application using Nuxt and Vuetify, everything is good and working fine but unfortunately the score from Lighthouse is not the best with only 42 in performance.
I already improved a few things like:
Better fonts loading from google;
Moving async code from nuxtServerInit to the layout;
Removing unnecessary third party services;
It went from 42 to 54 but I'm still not very happy about the result.
Unfortunately I'm not the best doing these improvements because I lack of knowledge.
I see the TTFB is not optimal at all but I don't really know what can I improve... So I hope you can help me to boost my application with hints and suggestions.
Here I will paste my nuxt.congig.js so that you're aware of what I'm using and how:
const path = require('path')
const colors = require('vuetify/es5/util/colors').default
const bodyParser = require('body-parser')
const maxAge = 60 * 60 * 24 * 365 // one year
const prefix = process.env.NODE_ENV === 'production' ? 'example.' : 'exampledev.'
const description =
'description...'
let domain
if (
process.env.NODE_ENV === 'production' &&
process.env.ENV_SLOT === 'staging'
) {
domain = 'example.azurewebsites.net'
} else if (
process.env.NODE_ENV === 'production' &&
process.env.ENV_SLOT !== 'staging'
) {
domain = 'example.com'
} else {
domain = ''
}
module.exports = {
mode: 'universal',
/**
* Disabled telemetry
*/
telemetry: false,
/*
** Server options
*/
server: {
port: process.env.PORT || 3030
},
serverMiddleware: [
bodyParser.json({ limit: '25mb' }),
'~/proxy',
'~/servermiddlewares/www.js'
],
router: {
middleware: 'maintenance'
},
env: {
baseUrl:
process.env.NODE_ENV === 'production'
? 'https://example.com'
: 'https://localhost:3030',
apiBaseUrl:
process.env.API_BASE_URL || 'https://example.azurewebsites.net'
},
/*
** Headers of the page
*/
head: {
title: 'example',
meta: [
{ charset: 'utf-8' },
{ name: 'viewport', content: 'width=device-width, initial-scale=1' },
{
hid: 'description',
name: 'description',
content: description
},
{
hid: 'fb:app_id',
property: 'fb:app_id',
content: process.env.FACEBOOK_APP_ID || 'example'
},
{
hid: 'fb:type',
property: 'fb:type',
content: 'website'
},
{
hid: 'og:site_name',
property: 'og:site_name',
content: 'example'
},
{
hid: 'og:url',
property: 'og:url',
content: 'https://example.com'
},
{
hid: 'og:title',
property: 'og:title',
content: 'example'
},
{
hid: 'og:description',
property: 'og:description',
content: description
},
{
hid: 'og:image',
property: 'og:image',
content: 'https://example.com/images/ogimage.jpg'
},
{
hid: 'robots',
name: 'robots',
content: 'index, follow'
},
{
name: 'msapplication-TileColor',
content: '#ffffff'
},
{
name: 'theme-color',
content: '#ffffff'
}
],
link: [
{
rel: 'apple-touch-icon',
sizes: '180x180',
href: '/apple-touch-icon.png?v=GvbAg4xwqL'
},
{
rel: 'icon',
type: 'image/png',
sizes: '32x32',
href: '/favicon-32x32.png?v=GvbAg4xwqL'
},
{
rel: 'icon',
type: 'image/png',
sizes: '16x16',
href: '/favicon-16x16.png?v=GvbAg4xwqL'
},
{ rel: 'manifest', href: '/site.webmanifest?v=GvbAg4xwqL' },
{
rel: 'mask-icon',
href: '/safari-pinned-tab.svg?v=GvbAg4xwqL',
color: '#777777'
},
{ rel: 'shortcut icon', href: '/favicon.ico?v=GvbAg4xwqL' },
{
rel: 'stylesheet',
href:
'https://fonts.googleapis.com/css?family=Abril+Fatface|Raleway:300,400,700&display=swap'
}
]
},
/*
** Customize the page loading
*/
loading: '~/components/loading.vue',
/*
** Global CSS
*/
css: ['~/assets/style/app.scss', 'swiper/dist/css/swiper.css'],
/*
** Plugins to load before mounting the App
*/
plugins: [
'#/plugins/axios',
'#/plugins/vue-swal',
'#/plugins/example',
{ src: '#/plugins/vue-infinite-scroll', ssr: false },
{ src: '#/plugins/croppa', ssr: false },
{ src: '#/plugins/vue-debounce', ssr: false },
{ src: '#/plugins/vue-awesome-swiper', ssr: false },
{ src: '#/plugins/vue-html2canvas', ssr: false },
{ src: '#/plugins/vue-goodshare', ssr: false }
],
/*
** Nuxt.js modules
*/
modules: [
'#/modules/static',
'#/modules/crawler',
'#nuxtjs/axios',
'#nuxtjs/auth',
'#nuxtjs/device',
'#nuxtjs/prismic',
'#dansmaculotte/nuxt-security',
'#nuxtjs/sitemap',
[
'#nuxtjs/google-analytics',
{
id: 'example',
debug: {
sendHitTask: process.env.NODE_ENV === 'production'
}
}
],
['cookie-universal-nuxt', { parseJSON: false }],
'nuxt-clipboard2'
],
/*
** Security configuration
*/
security: {
dev: process.env.NODE_ENV !== 'production',
hsts: {
maxAge: 15552000,
includeSubDomains: true,
preload: true
},
csp: {
directives: {
// removed contents
}
},
referrer: 'same-origin',
additionalHeaders: true
},
/*
** Prismic configuration
*/
prismic: {
endpoint: 'https://example.cdn.prismic.io/api/v2',
preview: false,
linkResolver: '#/plugins/link-resolver',
htmlSerializer: '#/plugins/html-serializer'
},
/*
** Auth module configuration
*/
auth: {
resetOnError: true,
localStorage: false,
cookie: {
prefix,
options: {
maxAge,
secure: true,
domain
}
},
redirect: {
callback: '/callback',
home: false
},
strategies: {
local: {
endpoints: {
login: {
url: '/auth/local',
method: 'POST',
propertyName: 'token'
},
logout: { url: '/auth/logout', method: 'POST' },
user: { url: '/me', method: 'GET', propertyName: false }
},
tokenRequired: true,
tokenType: 'Bearer'
},
google: {
client_id:
process.env.GOOGLE_CLIENT_ID ||
'example'
},
facebook: {
client_id: process.env.FACEBOOK_APP_ID || 'example',
userinfo_endpoint:
'https://graph.facebook.com/v2.12/me?fields=about,name,picture{url},email',
scope: ['public_profile', 'email']
}
}
},
/*
** Vuetify Module initialization
*/
buildModules: [
['#nuxtjs/pwa', { meta: false, oneSignal: false }],
'#nuxtjs/vuetify'
],
/*
** Vuetify configuration
*/
vuetify: {
customVariables: ['~/assets/style/variables.scss'],
treeShake: true,
rtl: false,
defaultAssets: {
font: false,
icons: 'fa'
}
},
/*
** Vue Loader configuration
*/
chainWebpack: config => {
config.plugin('VuetifyLoaderPlugin').tap(() => [
{
progressiveImages: true
}
])
},
/*
** Build configuration
*/
build: {
analyze: true,
optimizeCSS: true,
/*
** You can extend webpack config here
*/
extend(config, ctx) {
config.resolve.alias.vue = 'vue/dist/vue.common'
// Run ESLint on save
if (ctx.isDev && ctx.isClient) {
config.devtool = 'cheap-module-source-map'
config.module.rules.push({
enforce: 'pre',
test: /\.(js|vue)$/,
loader: 'eslint-loader',
exclude: /(node_modules)/,
options: {
fix: true
}
})
}
if (ctx.isServer) {
config.resolve.alias['~'] = path.resolve(__dirname)
config.resolve.alias['#'] = path.resolve(__dirname)
}
}
}
}
A few maybe useful information:
I use only scoped style for each page and component and the amount of custom style is really poor since I'm using almost everything from Vuetify as it is;
When I do "view page source" from my browser, I don't like to see a very long CSS inside the page, not minimised...
I don't load anything using fetch or asyncData, I prefer to load data once component is mounted;
Evrything is deployed on Azure and I consume a .Net core API.
What would be nice to know are the best practices with some examples to improve the performances, in particular the TTFB.
In Lighthouse I see "Remove unused JavaScript" with a list of /_nuxt/.. files... But I don't think these files are unused and so I would like to know why they are flagged like so...
Maybe Azure should clean the project on each deploy? I don't know...
I use the az Azure Cli and I deploy just by doing git push azure master, so nothing special.
"Reduce initial server response time"... How? The plan where production app is running is the faster in Azure, what should I improve and how?
"Minimize main-thread work": What does it mean?
"Reduce JavaScript execution time": How?
I hope you can help me to understand and boost everything.
I will keep this post updated with your requests, maybe you wish to see something more about the project. Thanks
I've recently had to go through this process with a rather large Nuxt application, so I can share some of the insights and solutions we came up with. We managed to bump ours up by about 40 points before we were happy.
My number one piece of advice for anyone reading: Ditch the frameworks. By design, they are bloated to handle as many common use cases as possible and make application as easy as possible, at the expense of size. In the realm of browsers, where size and speed are everything, each new framework (Nuxt, Vue, Vuetify) adds another layer of abstraction that negatively impacts size and speed.
Anyways, with that out of the way, here's some other pieces of advice for those that cannot ditch the frameworks.
Lighthouse can often be misleading
We found that the "Remove unused Javascript" warnings were basically impossible to fix with Vue. The problem is that Lighthouse is only able to inspect the code that is actually run during the test, and has no idea that code for error handling or onclick handling in the Vue runtime is necessary, until of course it is.
Unfortunately, it's not possible to know ahead of time what code in the runtime is going to be necessary, so it all needs to be sent. However, as the developer, you at least have control over what 3rd party libraries, modules, and plugins are needed during the initial load of the application. It's up to you to ensure only the necessary pieces are sent and used.
So in Lighthouses eyes, there's lots of useless, unused code. However, the second the application needs to do anything, it's no longer useless. Hence why it is somewhat misleading.
Always keep this in mind, because there's a lot of "problems" that these tools will report that are just a fact of how Javascript applications work. To me, it seems that the developers of these frameworks still have a few more hurdles to overcome in making Javascript apps truly accessible and performant in the eyes of Google.
Keep your Plugins and modules short.
Each plugin you add to your application in the nuxt.config.js increases the size of the main JS bundle included in each page. This inevitably leads to lots of unused code, huge JS file sizes, and of course, longer load times.
It's perfectly valid to instead add plugins to only the pages they're needed on:
// inside the SocialSharing.vue component
import Vue from 'vue'
import VueGoodshare from 'vue-goodshare'
Vue.use(VueGoodshare)
export default { ... }
A reminder though: The page this import happens will still have all the code from vue-goodshare added. It's much better to instead only include the components from these libraries that you actually need.
A good way to check this is running your build with the analyze property set to true. (It may be helpful for you to share your analysis here)
Reduce Initial Server Response Time
If you're already running the best server, there's still a few things you can do to help speed things up.
Leverage caching for your pages, so that there's no need to render them server side. However, some of these tests (like Lighthouse) specifically disable caching, leading to poor results.
Reduce the amount of work required to render pages. Ensure there's no blocking API calls happening, keep pages simple and small, and ensure that the server is not overloaded.
Utilize edge caching, or edge deployments, so that your application is closer to your users. For example, if your application is deployed in USWEST, and Lighthouse is being tested in Dubai, you're likely going to see a lot of latency in that request, which will drive up the server response time.
You may need to follow this up with the specific server you're running, and where it's located to get more help. However, the points I outlined would almost certainly get your TFFB to a green score.
Minimize Main Thread Work
In browsers, the main thread is where all the action happens. It is solely responsible for handling user interactions, updating the page, and in essence, turning a document of HTML into a living application. A main thread that is too busy can lead to performance problems, especially noticeable by users when they're trying to interact with your page.
Often, when seeing this, it's because you're running too much Javascript. Specifically, you're running too much Javascript all at once, which ends up blocking up the main thread. Javascript-heavy applications are notorious for this, and it can be a really challenging problem to solve.
The single biggest helper for our app was delaying the loading of unimportant scripts. For example, we run Rollbar, and Google Analytics on all our pages. Instead of loading the scripts at app-start, we instead just load their small command queues, and delay the load time of the big scripts by ~5s. This frees up the main thread to focus on more important things, like rehydrating the Vue application.
You'll also find significant savings by just reducing the amount of JS there is to process. Each line of code returned to the client is another line that has to be sent, parsed, and executed. I would definitely take a look at your modules and plugins first to see if there's some low hanging fruit.
Reduce Javascript Execution Time
This is another unfortunate metric being used, which in our test often just means "the app is still doing something". I say it's unfortunate because in our experience it did not impact the performance or user experience in the application.
We frequently saw our third party services, like Intercom, Rollbar, GA, etc, extending their execution times well past 10s, and with third party code, there's nothing you can do besides not use it.
My advice: Focus on optimizing the application using everything else I've highlighted. This is something that can be incredibly difficult to specifically fix, and is usually just a symptom of other things, such as the main thread being too busy, third part code being slow.
One Last Piece Of Advice
If all else fails, you may be able to "trick" some of the tests in your favour. We did this by delaying the load of our GA and Rollbar scripts until after the test has completed. Remember, this tool is looking at certain metrics in a certain timeframe, and scoring you based on that. You may be able to leverage simple alternate techniques, like lazy loading below the fold, to see a noticeable difference in performance.
Anyways, this is quite a complicated task, and by no means is there a "3 step guide to success" here. You'll find plenty of guides online claiming they've brought their Vue app from 30 to 100 with a few simple changes, but they all ignore the fact that real apps have a lot of code and do a lot of things, and balancing that with speed and performance is an art form.
You may want to take a look at resources such as the shell application model, or service workers.
If you need any clarification on this post, feel free to ask away. But keep in mind, the question you're asking is broad, and doesn't just have a single "right" way of approaching. It's ultimately up to you to take the important bits here and apply them as you can.
Update with examples
Most of what I've talked about has been quite hard to show examples for, as I've covered topics that are either overly simplistic and don't need an explanation, or are vague concepts to begin with. However, one method we used that had some good results can be shown.
Here's an example of a modified script we use to load Intercom:
var APP_ID = "your_app_id_here";
window.intercomSettings = {
app_id: APP_ID,
hide_default_launcher: !0,
session_duration: 36e5
},
function() {
var n,
e,
t = window,
o = t.Intercom;
"function" == typeof o ? (o("reattach_activator"), o("update", t.intercomSettings)) : (n = document, (e = function() {
e.c(arguments)
}).q = [], e.c = function(t) {
e.q.push(t)
}, t.Intercom = e, o = function() {
// Don't load the full Intercom script until after 10s
setTimeout(function() {
var t = n.createElement("script");
t.type = "text/javascript",
t.crossorigin = "anonymous",
t.async = !0,
t.src = "https://widget.intercom.io/widget/" + APP_ID;
var e = n.getElementsByTagName("script")[0];
e.parentNode.insertBefore(t, e)
}, 1e4)
}, "complete" === document.readyState ? o() : t.attachEvent ? t.attachEvent("onload", o) : t.addEventListener("load", o, !1))
This is a custom version of the script they give you to place in your apps <head></head> tag. However, you'll notice we've added a setTimeout function that will delay the loading of the full Intercom script. This gives your application a chance to load everything else without competing for network or CPU time.
However, as Intercom is no longer guaranteed to be available, you'll need to use greater caution when interacting with it.
This exact same concept can be applied to just about every 3rd party script you might load in. We also use it with Google Analytics, where we initialize the command queue, but defer loading the actual script. Obviously, this can cause tracking issues with short sessions, but that is the tradeoff you need to make if performance is your primary goal.
Related
I'm trying to do a performance test on a
SPA with a Frontend in React, deployed with Netlify
As a backend we're using Hasura Cloud Graphql (std version) https://hasura.io/, where everything from the client goes directly through Hasura to the DB.
DB is in Postgress housed in Heroku (Std 0 tier).
We're hoping to be able to have around 800 users simultaneous.
The problem is that i'm loss about how to do it or if i'm doing it correctly, seeing how most of our stuff are "subscriptions/mutations" that I had to transform into queries. I tried doing those test with k6 and Jmeter but i'm not sure if i'm doing them properly.
k6 test
At first, i did a quick search and collected around 10 subscriptions that are commonly used. Then i tried to create a performance test with k6 https://k6.io/docs/using-k6/http-requests/ but i wasn't able to create a working subscription test so i just transform each subscription into a query and perform a http.post with this setup:
export const options = {
stages: [
{ duration: '30s', target: 75 },
{ duration: '120s', target: 75 },
{ duration: '60s', target: 50 },
{ duration: '30s', target: 30 },
{ duration: '10s', target: 0 }
]
};
export default function () {
var res = http.post(prod,
JSON.stringify({
query: listaQueries.GetDesafiosCursosByKey(
keys.desafioCursoKey
)}), params);
sleep(1)
}
I did this for every query and ran each test individually. Unfortunately, the numbers i got were bad, and somehow our test environment was getting better times than production. (The only difference afaik is that we're using Hasura Cloud for production).
I tried to implement websocket, but i couldn't getthem work and configure them to do a stress/load test.
K6 result
Jmeter test
After that, i tried something similar with Jmeter, but again i couldn't figure how to set up a subscription test (after i while, i read in a blog that jmeter doesn't support it
https://qainsights.com/deep-dive-into-graphql-in-jmeter/ ) so i simply transformed all subscriptions into a query and tried to do the same, but the numbers I was getting were different and much higher than k6.
Jmeter query Config 1
Jmeter query config 2
Jmeter thread config
Questions
I'm not sure if i'm doing it correctly, if transforming every subscription into a query and perform a http request is a correct approach for it. (At least I know that those queries return the data correctly).
Should i just increase the number of VUS/threads until i get a constant timeout to simulate a stress test? There were some test that are causing a graphql error on the website Graphql error, and others were having a
""WARN[0059] Request Failed error="Post \"https://xxxxxxx-xxxxx.herokuapp.com/v1/graphql\": EOF""
in the k6 console.
Or should i just give up with k6/jmeter and try to search for another tool to perfom those test?
Thanks you in advance, and sorry for my English and explanation, but i'm a complete newbie at this.
I'm not sure if i'm doing it correctly, if transforming every
subscription into a query and perform a http request is a correct
approach for it. (At least I know that those queries return the data
correctly).
Ideally you would be using WebSocket as that is what actual clients will most likely be using.
For code samples, check out the answer here.
Here's a more complete example utilizing a main.js entry script with modularized Subscription code in subscriptions\bikes.brands.js. It also uses the Httpx library to set a global request header:
// main.js
import { Httpx } from 'https://jslib.k6.io/httpx/0.0.5/index.js';
import { getBikeBrandsByIdSub } from './subscriptions/bikes-brands.js';
const session = new Httpx({
baseURL: `http://54.227.75.222:8080`
});
const wsUri = 'wss://54.227.75.222:8080/v1/graphql';
const pauseMin = 2;
const pauseMax = 6;
export const options = {};
export default function () {
session.addHeader('Content-Type', 'application/json');
getBikeBrandsByIdSub(1);
}
// subscriptions/bikes-brands.js
import ws from 'k6/ws';
/* using string concatenation */
export function getBikeBrandsByIdSub(id) {
const query = `
subscription getBikeBrandsByIdSub {
bikes_brands(where: {id: {_eq: ${id}}}) {
id
brand
notes
updated_at
created_at
}
}
`;
const subscribePayload = {
id: "1",
payload: {
extensions: {},
operationName: "query",
query: query,
variables: {},
},
type: "start",
}
const initPayload = {
payload: {
headers: {
"content-type": "application/json",
},
lazy: true,
},
type: "connection_init",
};
console.debug(JSON.stringify(subscribePayload));
// start a WS connection
const res = ws.connect(wsUri, initPayload, function(socket) {
socket.on('open', function() {
console.debug('WS connection established!');
// send the connection_init:
socket.send(JSON.stringify(initPayload));
// send the chat subscription:
socket.send(JSON.stringify(subscribePayload));
});
socket.on('message', function(message) {
let messageObj;
try {
messageObj = JSON.parse(message);
}
catch (err) {
console.warn('Unable to parse WS message as JSON: ' + message);
}
if (messageObj.type === 'data') {
console.log(`${messageObj.type} message received by VU ${__VU}: ${Object.keys(messageObj.payload.data)[0]}`);
}
console.log(`WS message received by VU ${__VU}:\n` + message);
});
});
}
Should i just increase the number of VUS/threads until i get a
constant timeout to simulate a stress test?
Timeouts and errors that only happen under load are signals that you may be hitting a bottleneck somewhere. Do you only see the EOFs under load? These are basically the server sending back incomplete responses/closing connections early which shouldn't happen under normal circumstances.
My expectation is that your test should be replicating the real user activity as close as possible. I doubt that real users will be sending requests to GraphQL directly and well-behaved load test must replicate the real life application usage as close as possible.
So I believe you should move to HTTP protocol level and mimic the network footprint of the real browser instead of trying to come up with individual GraphQL queries.
With regards to JMeter and k6 differences it might be the case that k6 produces higher throughput given the same hardware and running requests at maximum speed as it evidenced by kind of benchmark in the Open Source Load Testing Tools 2021 article, however given you're trying to simulate real users using real browsers accessing your applications and the real users don't hammer the application non-stop, they need some time to "think" between operations you should be getting the same number of requests for both load testing tools, if JMeter doesn't give you the load you want to conduct make sure to follow JMeter Best Practices and/or consider running it in distributed mode .
We've run into some problems for our MultiSite Spartacus setup when doing I18n.
We'd like to have different translations for each site, so we put these on an API that can give back the messages dependent on the baseSite, eg: backend.org/baseSiteX/messages?group=common
But the Spartacus setup doesn't let us pass the baseSite? We can
pass {{lng}} and {{ns}}, but no baseSite.
See https://sap.github.io/spartacus-docs/i18n/#lazy-loading
We'd could do it by overriding i18nextInit, but I'm unsure how to achieve this.
In the documentation, it says you can use crossOrigin: true in the config, but that does not seem to work. The type-checking say it's unsupported, and it still shows uw CORS-issues
Does someone have ideas for these problems?
Currently only language {{lng}} and chunk name {{ns}} are supported as dynamic params in the i18n.backend.loadPath config.
To achieve your goal, you can implement a custom Spartacus CONFIG_INITIALIZER to will populate your i18n.backend.loadPath config based on the value from the BaseSiteService.getActive():
#Injectable({ providedIn: 'root' })
export class I18nBackendPathConfigInitializer implements ConfigInitializer {
readonly scopes = ['i18n.backend.loadPath']; // declare config key that you will resolve
readonly configFactory = () => this.resolveConfig().toPromise();
constructor(protected baseSiteService: BaseSiteService) {}
protected resolveConfig(): Observable<I18nConfig> {
return this.baseSiteService.getActive().pipe(
take(1),
map((baseSite) => ({
i18n: {
backend: {
// initialize your i18n backend path using the basesite value:
loadPath: `https://backend.org/${baseSite}/messages?lang={{lng}}&group={{ns}}`,
},
},
}))
);
}
}
and provide it in your module (i.e. in app.module):
#NgModule({
providers: [
{
provide: CONFIG_INITIALIZER,
useExisting: I18nBackendPathConfigInitializer,
multi: true,
},
],
/* ... */
})
Note: the above solution assumes the active basesite is set only once, on app start (which is the case in Spartacus by default).
i am using Vue cli 3 and created a PWA using the PWA plugin. it all works pretty well and i am getting a Lighthouse Progressive Webb App score of 100 and a Performance score of 68 on a 3G connection.
the problem affecting my Performance score is that i fail to "Serve static assets with an efficient cache policy".
i also tested the app on webPageTest.org and it indicates a problem with "Leverage browser caching of static assets"
my website is https://www.istimuli.com/
i am assuming that i have to use runtime caching to cache these files. i use the GenerateSW plugin and tried the runtimeCaching option but it does not work.
i'd really appreciate any help to cache these files and getting the a higher Performance score.
thanks
here's my vue.config.js file
const { GenerateSW } = require('workbox-webpack-plugin');
const VueLoaderPlugin = require('vue-loader/lib/plugin');
const MiniCssExtractPlugin = require("mini-css-extract-plugin");
const PurgecssPlugin = require('purgecss-webpack-plugin');
// var HtmlWebpackPlugin = require('html-webpack-plugin');
const glob = require('glob-all');
const path = require('path');
module.exports = {
pwa: {
module: {
rules: [
{
test: /\.vue$/,
loader: 'vue-loader',
},
{
test: /\.css$/,
// use: [
// {
// loader: MiniCssExtractPlugin.loader,
// },
// "css-loader"
// ]
use: [
'style-loader',
{
loader: 'css-loader', options: {
minimize: true
}
}
]
}
]
},
plugins: [
new VueLoaderPlugin(),
new MiniCssExtractPlugin(),
new PurgecssPlugin({
paths: glob.sync([
path.join(__dirname, './public/index.html'),
path.join(__dirname, './src/assets/myJavascript/*.js'),
path.join(__dirname, './src/assets/css/*.css'),
path.join(__dirname, './src/components/*.vue'),
path.join(__dirname, './src/plugins/*.js'),
path.join(__dirname, './src/*.js'),
path.join(__dirname, './src/*.vue'),
])
}),
new GenerateSW({
runtimeCaching: [
{
urlPattern: new RegExp('^https://cors\.sdk.amazonaws.com/'),
handler: 'staleWhileRevalidate',
options: {
cacheableResponse: {
statuses: [0, 200]
}
}
},
{
urlPattern: /manifest/,
handler: 'staleWhileRevalidate',
options: {
expiration: {
maxEntries: 5,
maxAgeSeconds: 60 * 60 * 24 * 7,
}
}
}
]
})
],
}
}
edited
so i edited my server's htaccess file to apply cache control:
and things have improved somewhat:
i now only have the aws sdk file to contend with. i'm assuming that CORS has something to do with this. so any help regarding how to cache this would be appreciated.
further, checking the lighthouse report, i see that the aws sdk is indicated as not having an efficient cache policy - i assume that this is also linked to the CORS issue mentioned above?
there are 11 resources listed (i have only included 3 in the image) all are listed as having a cache lifespan of 30 days.is this sufficient or should it be longer to get a better lighthouse score?
i notice that the fonts file (with woff2 extension) is now cached by the browser even though i did not include this extension when i modified the htaccess file (see above).i find this confusing, any idea why its caching now and did not do so before i updated the htaccess file?
so i guess, for now my main concern is caching the aws sdk and any help in this regard would be appreciated.
thanks
I have tried following numerous tutorials on connecting Drupal, and GatsbyJS. For instance, in this article, I get to step 4.3b, "Pull Content from the Drupal 8 site using GraphQL" before everything fails.
I have followed every instruction, but GraphQL seems to be pulling from the temporary Gatsby page rather than the Drupal site I made. Queries such as "allNodeBlog" don't exist, and I'm not sure how to fix everything so I can get to pulling information from my Drupal JSON API page rather than the Gatsby site.
Any help/advice would be great. And/or articles that aren't simply the first Googled "Drupal and Gatsby" help sites (as I've looked at them all).
module.exports = {
siteMetadata: {
title: 'Gatsby Default Starter',
},
plugins: [
'gatsby-plugin-react-helmet',
{
resolve: 'gatsby-source-drupal',
options: {
baseUrl: 'http://gatsbydrupal.dd:8083/',
apiBase: 'api', // endpoint of Drupal server
},
},
{
resolve: `gatsby-plugin-manifest`,
options: {
name: 'gatsby-starter-default',
short_name: 'starter',
start_url: '/',
background_color: '#663399',
theme_color: '#663399',
display: 'minimal-ui',
icon: 'src/images/gatsby-icon.png', // This path is relative to the root of the site.
},
},
'gatsby-plugin-offline',
],
}
I'm using DocPad to generate system documentation. I am including release notes in the format
http://example.com/releases/1.0
http://example.com/releases/1.1
http://example.com/releases/1.2
http://example.com/releases/1.3
I want to include a link which will redirect to the most recent release.
http://example.com/releases/latest
My question: how do I make a link that will redirect to a relative URL based on configuration? I want this to be easily changeable by a non-programmer.
Update: I've added cleanurls into my docpad.js, similar to example below. (see code below). But using "grunt docpad:generate" seems to skip making the redirect (is this an HTML page?). I've a static site. I also confirmed I'm using the latest cleanurls (2.8.1) in my package.json.
Here's my docpad.js
'use strict';
var releases = require('./releases.json'); // list them as a list, backwards: ["1.3", "1.2", "1.1", "1.0"]
var latestRelease = releases.slice(1,2)[0];
module.exports = {
outPath: 'epicenter/docs/',
templateData: {
site: {
swiftype: {
apiKey: 'XXXX',
resultsUrl: '/epicenter/docs/search.html'
},
ga: 'XXXX'
},
},
collections: {
public: function () {
return this.getCollection('documents').findAll({
relativeOutDirPath: /public.*/, isPage: true
});
}
},
plugins: {
cleanurls: {
simpleRedirects: {'/public/releases/latest': '/public/releases/' + latestRelease}
},
lunr: {
resultsTemplate: 'src/partials/teaser.html.eco',
indexes: {
myIndex: {
collection: 'public',
indexFields: [{
name: 'title',
boost: 10
}, {
name: 'body',
boost: 1
}]
}
}
}
}
};
When I run grunt docpad:generate, my pages get generated, but there is an error near the end:
/data/jenkins/workspace/stage-epicenter-docs/docs/docpad/node_modules/docpad-plugin-cleanurls/node_modules/taskgroup/node_modules/ambi/es6/lib/ambi.js:5
export default function ambi (method, ...args) {
^^^^^^
I can't tell if that's the issue preventing this from running but it seems suspicious.
Providing that your configuration is available to the DocPad Configuration File, you can use the redirect abilities of the cleanurls plugin to accomplish this for both dynamic and static environments.
With a docpad.coffee configuration file, it would look something like this:
releases = require('./releases.json') # ['1.0', '1.1', '1.2', '1.3']
latestRelease = releases.slice(-1)[0]
docpadConfig =
plugins:
cleanurls:
simpleRedirects:
'/releases/latest': '/releases/' + latestRelease
module.exports = docpadConfig