[{"data":1,"prerenderedAt":973},["ShallowReactive",2],{"/en-us/blog/tags/zero-trust/":3,"navigation-en-us":20,"banner-en-us":438,"footer-en-us":450,"zero trust-tag-page-en-us":661},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":11,"_id":13,"_type":14,"title":15,"_source":16,"_file":17,"_stem":18,"_extension":19},"/en-us/blog/tags/zero-trust","tags",false,"",{"tag":9,"tagSlug":10},"zero trust","zero-trust",{"template":12},"BlogTag","content:en-us:blog:tags:zero-trust.yml","yaml","Zero Trust","content","en-us/blog/tags/zero-trust.yml","en-us/blog/tags/zero-trust","yml",{"_path":21,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":23,"_id":434,"_type":14,"title":435,"_source":16,"_file":436,"_stem":437,"_extension":19},"/shared/en-us/main-navigation","en-us",{"logo":24,"freeTrial":29,"sales":34,"login":39,"items":44,"search":375,"minimal":406,"duo":425},{"config":25},{"href":26,"dataGaName":27,"dataGaLocation":28},"/","gitlab logo","header",{"text":30,"config":31},"Get free trial",{"href":32,"dataGaName":33,"dataGaLocation":28},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":35,"config":36},"Talk to sales",{"href":37,"dataGaName":38,"dataGaLocation":28},"/sales/","sales",{"text":40,"config":41},"Sign in",{"href":42,"dataGaName":43,"dataGaLocation":28},"https://gitlab.com/users/sign_in/","sign in",[45,89,185,190,296,356],{"text":46,"config":47,"cards":49,"footer":72},"Platform",{"dataNavLevelOne":48},"platform",[50,56,64],{"title":46,"description":51,"link":52},"The most comprehensive AI-powered DevSecOps Platform",{"text":53,"config":54},"Explore our Platform",{"href":55,"dataGaName":48,"dataGaLocation":28},"/platform/",{"title":57,"description":58,"link":59},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":60,"config":61},"Meet GitLab Duo",{"href":62,"dataGaName":63,"dataGaLocation":28},"/gitlab-duo/","gitlab duo ai",{"title":65,"description":66,"link":67},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":68,"config":69},"Learn more",{"href":70,"dataGaName":71,"dataGaLocation":28},"/why-gitlab/","why gitlab",{"title":73,"items":74},"Get started with",[75,80,85],{"text":76,"config":77},"Platform Engineering",{"href":78,"dataGaName":79,"dataGaLocation":28},"/solutions/platform-engineering/","platform engineering",{"text":81,"config":82},"Developer Experience",{"href":83,"dataGaName":84,"dataGaLocation":28},"/developer-experience/","Developer experience",{"text":86,"config":87},"MLOps",{"href":88,"dataGaName":86,"dataGaLocation":28},"/topics/devops/the-role-of-ai-in-devops/",{"text":90,"left":91,"config":92,"link":94,"lists":98,"footer":167},"Product",true,{"dataNavLevelOne":93},"solutions",{"text":95,"config":96},"View all Solutions",{"href":97,"dataGaName":93,"dataGaLocation":28},"/solutions/",[99,124,146],{"title":100,"description":101,"link":102,"items":107},"Automation","CI/CD and automation to accelerate deployment",{"config":103},{"icon":104,"href":105,"dataGaName":106,"dataGaLocation":28},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[108,112,116,120],{"text":109,"config":110},"CI/CD",{"href":111,"dataGaLocation":28,"dataGaName":109},"/solutions/continuous-integration/",{"text":113,"config":114},"AI-Assisted Development",{"href":62,"dataGaLocation":28,"dataGaName":115},"AI assisted development",{"text":117,"config":118},"Source Code Management",{"href":119,"dataGaLocation":28,"dataGaName":117},"/solutions/source-code-management/",{"text":121,"config":122},"Automated Software Delivery",{"href":105,"dataGaLocation":28,"dataGaName":123},"Automated software delivery",{"title":125,"description":126,"link":127,"items":132},"Security","Deliver code faster without compromising security",{"config":128},{"href":129,"dataGaName":130,"dataGaLocation":28,"icon":131},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[133,136,141],{"text":134,"config":135},"Security & Compliance",{"href":129,"dataGaLocation":28,"dataGaName":134},{"text":137,"config":138},"Software Supply Chain Security",{"href":139,"dataGaLocation":28,"dataGaName":140},"/solutions/supply-chain/","Software supply chain security",{"text":142,"config":143},"Compliance & Governance",{"href":144,"dataGaLocation":28,"dataGaName":145},"/solutions/continuous-software-compliance/","Compliance and governance",{"title":147,"link":148,"items":153},"Measurement",{"config":149},{"icon":150,"href":151,"dataGaName":152,"dataGaLocation":28},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[154,158,162],{"text":155,"config":156},"Visibility & Measurement",{"href":151,"dataGaLocation":28,"dataGaName":157},"Visibility and Measurement",{"text":159,"config":160},"Value Stream Management",{"href":161,"dataGaLocation":28,"dataGaName":159},"/solutions/value-stream-management/",{"text":163,"config":164},"Analytics & Insights",{"href":165,"dataGaLocation":28,"dataGaName":166},"/solutions/analytics-and-insights/","Analytics and insights",{"title":168,"items":169},"GitLab for",[170,175,180],{"text":171,"config":172},"Enterprise",{"href":173,"dataGaLocation":28,"dataGaName":174},"/enterprise/","enterprise",{"text":176,"config":177},"Small Business",{"href":178,"dataGaLocation":28,"dataGaName":179},"/small-business/","small business",{"text":181,"config":182},"Public Sector",{"href":183,"dataGaLocation":28,"dataGaName":184},"/solutions/public-sector/","public sector",{"text":186,"config":187},"Pricing",{"href":188,"dataGaName":189,"dataGaLocation":28,"dataNavLevelOne":189},"/pricing/","pricing",{"text":191,"config":192,"link":194,"lists":198,"feature":283},"Resources",{"dataNavLevelOne":193},"resources",{"text":195,"config":196},"View all resources",{"href":197,"dataGaName":193,"dataGaLocation":28},"/resources/",[199,232,255],{"title":200,"items":201},"Getting started",[202,207,212,217,222,227],{"text":203,"config":204},"Install",{"href":205,"dataGaName":206,"dataGaLocation":28},"/install/","install",{"text":208,"config":209},"Quick start guides",{"href":210,"dataGaName":211,"dataGaLocation":28},"/get-started/","quick setup checklists",{"text":213,"config":214},"Learn",{"href":215,"dataGaLocation":28,"dataGaName":216},"https://university.gitlab.com/","learn",{"text":218,"config":219},"Product documentation",{"href":220,"dataGaName":221,"dataGaLocation":28},"https://docs.gitlab.com/","product documentation",{"text":223,"config":224},"Best practice videos",{"href":225,"dataGaName":226,"dataGaLocation":28},"/getting-started-videos/","best practice videos",{"text":228,"config":229},"Integrations",{"href":230,"dataGaName":231,"dataGaLocation":28},"/integrations/","integrations",{"title":233,"items":234},"Discover",[235,240,245,250],{"text":236,"config":237},"Customer success stories",{"href":238,"dataGaName":239,"dataGaLocation":28},"/customers/","customer success stories",{"text":241,"config":242},"Blog",{"href":243,"dataGaName":244,"dataGaLocation":28},"/blog/","blog",{"text":246,"config":247},"Remote",{"href":248,"dataGaName":249,"dataGaLocation":28},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":251,"config":252},"TeamOps",{"href":253,"dataGaName":254,"dataGaLocation":28},"/teamops/","teamops",{"title":256,"items":257},"Connect",[258,263,268,273,278],{"text":259,"config":260},"GitLab Services",{"href":261,"dataGaName":262,"dataGaLocation":28},"/services/","services",{"text":264,"config":265},"Community",{"href":266,"dataGaName":267,"dataGaLocation":28},"/community/","community",{"text":269,"config":270},"Forum",{"href":271,"dataGaName":272,"dataGaLocation":28},"https://forum.gitlab.com/","forum",{"text":274,"config":275},"Events",{"href":276,"dataGaName":277,"dataGaLocation":28},"/events/","events",{"text":279,"config":280},"Partners",{"href":281,"dataGaName":282,"dataGaLocation":28},"/partners/","partners",{"backgroundColor":284,"textColor":285,"text":286,"image":287,"link":291},"#2f2a6b","#fff","Insights for the future of software development",{"altText":288,"config":289},"the source promo card",{"src":290},"/images/navigation/the-source-promo-card.svg",{"text":292,"config":293},"Read the latest",{"href":294,"dataGaName":295,"dataGaLocation":28},"/the-source/","the source",{"text":297,"config":298,"lists":300},"Company",{"dataNavLevelOne":299},"company",[301],{"items":302},[303,308,314,316,321,326,331,336,341,346,351],{"text":304,"config":305},"About",{"href":306,"dataGaName":307,"dataGaLocation":28},"/company/","about",{"text":309,"config":310,"footerGa":313},"Jobs",{"href":311,"dataGaName":312,"dataGaLocation":28},"/jobs/","jobs",{"dataGaName":312},{"text":274,"config":315},{"href":276,"dataGaName":277,"dataGaLocation":28},{"text":317,"config":318},"Leadership",{"href":319,"dataGaName":320,"dataGaLocation":28},"/company/team/e-group/","leadership",{"text":322,"config":323},"Team",{"href":324,"dataGaName":325,"dataGaLocation":28},"/company/team/","team",{"text":327,"config":328},"Handbook",{"href":329,"dataGaName":330,"dataGaLocation":28},"https://handbook.gitlab.com/","handbook",{"text":332,"config":333},"Investor relations",{"href":334,"dataGaName":335,"dataGaLocation":28},"https://ir.gitlab.com/","investor relations",{"text":337,"config":338},"Trust Center",{"href":339,"dataGaName":340,"dataGaLocation":28},"/security/","trust center",{"text":342,"config":343},"AI Transparency Center",{"href":344,"dataGaName":345,"dataGaLocation":28},"/ai-transparency-center/","ai transparency center",{"text":347,"config":348},"Newsletter",{"href":349,"dataGaName":350,"dataGaLocation":28},"/company/contact/","newsletter",{"text":352,"config":353},"Press",{"href":354,"dataGaName":355,"dataGaLocation":28},"/press/","press",{"text":357,"config":358,"lists":359},"Contact us",{"dataNavLevelOne":299},[360],{"items":361},[362,365,370],{"text":35,"config":363},{"href":37,"dataGaName":364,"dataGaLocation":28},"talk to sales",{"text":366,"config":367},"Get help",{"href":368,"dataGaName":369,"dataGaLocation":28},"/support/","get help",{"text":371,"config":372},"Customer portal",{"href":373,"dataGaName":374,"dataGaLocation":28},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":376,"login":377,"suggestions":384},"Close",{"text":378,"link":379},"To search repositories and projects, login to",{"text":380,"config":381},"gitlab.com",{"href":42,"dataGaName":382,"dataGaLocation":383},"search login","search",{"text":385,"default":386},"Suggestions",[387,389,393,395,399,403],{"text":57,"config":388},{"href":62,"dataGaName":57,"dataGaLocation":383},{"text":390,"config":391},"Code Suggestions (AI)",{"href":392,"dataGaName":390,"dataGaLocation":383},"/solutions/code-suggestions/",{"text":109,"config":394},{"href":111,"dataGaName":109,"dataGaLocation":383},{"text":396,"config":397},"GitLab on AWS",{"href":398,"dataGaName":396,"dataGaLocation":383},"/partners/technology-partners/aws/",{"text":400,"config":401},"GitLab on Google Cloud",{"href":402,"dataGaName":400,"dataGaLocation":383},"/partners/technology-partners/google-cloud-platform/",{"text":404,"config":405},"Why GitLab?",{"href":70,"dataGaName":404,"dataGaLocation":383},{"freeTrial":407,"mobileIcon":412,"desktopIcon":417,"secondaryButton":420},{"text":408,"config":409},"Start free trial",{"href":410,"dataGaName":33,"dataGaLocation":411},"https://gitlab.com/-/trials/new/","nav",{"altText":413,"config":414},"Gitlab Icon",{"src":415,"dataGaName":416,"dataGaLocation":411},"/images/brand/gitlab-logo-tanuki.svg","gitlab icon",{"altText":413,"config":418},{"src":419,"dataGaName":416,"dataGaLocation":411},"/images/brand/gitlab-logo-type.svg",{"text":421,"config":422},"Get Started",{"href":423,"dataGaName":424,"dataGaLocation":411},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":426,"mobileIcon":430,"desktopIcon":432},{"text":427,"config":428},"Learn more about GitLab Duo",{"href":62,"dataGaName":429,"dataGaLocation":411},"gitlab duo",{"altText":413,"config":431},{"src":415,"dataGaName":416,"dataGaLocation":411},{"altText":413,"config":433},{"src":419,"dataGaName":416,"dataGaLocation":411},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":439,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"title":440,"button":441,"config":445,"_id":447,"_type":14,"_source":16,"_file":448,"_stem":449,"_extension":19},"/shared/en-us/banner","GitLab Duo Agent Platform is now in public beta!",{"text":68,"config":442},{"href":443,"dataGaName":444,"dataGaLocation":28},"/gitlab-duo/agent-platform/","duo banner",{"layout":446},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":451,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":452,"_id":657,"_type":14,"title":658,"_source":16,"_file":659,"_stem":660,"_extension":19},"/shared/en-us/main-footer",{"text":453,"source":454,"edit":460,"contribute":465,"config":470,"items":475,"minimal":649},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":455,"config":456},"View page source",{"href":457,"dataGaName":458,"dataGaLocation":459},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":461,"config":462},"Edit this page",{"href":463,"dataGaName":464,"dataGaLocation":459},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":466,"config":467},"Please contribute",{"href":468,"dataGaName":469,"dataGaLocation":459},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":471,"facebook":472,"youtube":473,"linkedin":474},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[476,499,556,585,619],{"title":46,"links":477,"subMenu":482},[478],{"text":479,"config":480},"DevSecOps platform",{"href":55,"dataGaName":481,"dataGaLocation":459},"devsecops platform",[483],{"title":186,"links":484},[485,489,494],{"text":486,"config":487},"View plans",{"href":188,"dataGaName":488,"dataGaLocation":459},"view plans",{"text":490,"config":491},"Why Premium?",{"href":492,"dataGaName":493,"dataGaLocation":459},"/pricing/premium/","why premium",{"text":495,"config":496},"Why Ultimate?",{"href":497,"dataGaName":498,"dataGaLocation":459},"/pricing/ultimate/","why ultimate",{"title":500,"links":501},"Solutions",[502,507,510,512,517,522,526,529,533,538,540,543,546,551],{"text":503,"config":504},"Digital transformation",{"href":505,"dataGaName":506,"dataGaLocation":459},"/topics/digital-transformation/","digital transformation",{"text":134,"config":508},{"href":129,"dataGaName":509,"dataGaLocation":459},"security & compliance",{"text":123,"config":511},{"href":105,"dataGaName":106,"dataGaLocation":459},{"text":513,"config":514},"Agile development",{"href":515,"dataGaName":516,"dataGaLocation":459},"/solutions/agile-delivery/","agile delivery",{"text":518,"config":519},"Cloud transformation",{"href":520,"dataGaName":521,"dataGaLocation":459},"/topics/cloud-native/","cloud transformation",{"text":523,"config":524},"SCM",{"href":119,"dataGaName":525,"dataGaLocation":459},"source code management",{"text":109,"config":527},{"href":111,"dataGaName":528,"dataGaLocation":459},"continuous integration & delivery",{"text":530,"config":531},"Value stream management",{"href":161,"dataGaName":532,"dataGaLocation":459},"value stream management",{"text":534,"config":535},"GitOps",{"href":536,"dataGaName":537,"dataGaLocation":459},"/solutions/gitops/","gitops",{"text":171,"config":539},{"href":173,"dataGaName":174,"dataGaLocation":459},{"text":541,"config":542},"Small business",{"href":178,"dataGaName":179,"dataGaLocation":459},{"text":544,"config":545},"Public sector",{"href":183,"dataGaName":184,"dataGaLocation":459},{"text":547,"config":548},"Education",{"href":549,"dataGaName":550,"dataGaLocation":459},"/solutions/education/","education",{"text":552,"config":553},"Financial services",{"href":554,"dataGaName":555,"dataGaLocation":459},"/solutions/finance/","financial services",{"title":191,"links":557},[558,560,562,564,567,569,571,573,575,577,579,581,583],{"text":203,"config":559},{"href":205,"dataGaName":206,"dataGaLocation":459},{"text":208,"config":561},{"href":210,"dataGaName":211,"dataGaLocation":459},{"text":213,"config":563},{"href":215,"dataGaName":216,"dataGaLocation":459},{"text":218,"config":565},{"href":220,"dataGaName":566,"dataGaLocation":459},"docs",{"text":241,"config":568},{"href":243,"dataGaName":244,"dataGaLocation":459},{"text":236,"config":570},{"href":238,"dataGaName":239,"dataGaLocation":459},{"text":246,"config":572},{"href":248,"dataGaName":249,"dataGaLocation":459},{"text":259,"config":574},{"href":261,"dataGaName":262,"dataGaLocation":459},{"text":251,"config":576},{"href":253,"dataGaName":254,"dataGaLocation":459},{"text":264,"config":578},{"href":266,"dataGaName":267,"dataGaLocation":459},{"text":269,"config":580},{"href":271,"dataGaName":272,"dataGaLocation":459},{"text":274,"config":582},{"href":276,"dataGaName":277,"dataGaLocation":459},{"text":279,"config":584},{"href":281,"dataGaName":282,"dataGaLocation":459},{"title":297,"links":586},[587,589,591,593,595,597,599,603,608,610,612,614],{"text":304,"config":588},{"href":306,"dataGaName":299,"dataGaLocation":459},{"text":309,"config":590},{"href":311,"dataGaName":312,"dataGaLocation":459},{"text":317,"config":592},{"href":319,"dataGaName":320,"dataGaLocation":459},{"text":322,"config":594},{"href":324,"dataGaName":325,"dataGaLocation":459},{"text":327,"config":596},{"href":329,"dataGaName":330,"dataGaLocation":459},{"text":332,"config":598},{"href":334,"dataGaName":335,"dataGaLocation":459},{"text":600,"config":601},"Sustainability",{"href":602,"dataGaName":600,"dataGaLocation":459},"/sustainability/",{"text":604,"config":605},"Diversity, inclusion and belonging (DIB)",{"href":606,"dataGaName":607,"dataGaLocation":459},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":337,"config":609},{"href":339,"dataGaName":340,"dataGaLocation":459},{"text":347,"config":611},{"href":349,"dataGaName":350,"dataGaLocation":459},{"text":352,"config":613},{"href":354,"dataGaName":355,"dataGaLocation":459},{"text":615,"config":616},"Modern Slavery Transparency Statement",{"href":617,"dataGaName":618,"dataGaLocation":459},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":620,"links":621},"Contact Us",[622,625,627,629,634,639,644],{"text":623,"config":624},"Contact an expert",{"href":37,"dataGaName":38,"dataGaLocation":459},{"text":366,"config":626},{"href":368,"dataGaName":369,"dataGaLocation":459},{"text":371,"config":628},{"href":373,"dataGaName":374,"dataGaLocation":459},{"text":630,"config":631},"Status",{"href":632,"dataGaName":633,"dataGaLocation":459},"https://status.gitlab.com/","status",{"text":635,"config":636},"Terms of use",{"href":637,"dataGaName":638,"dataGaLocation":459},"/terms/","terms of use",{"text":640,"config":641},"Privacy statement",{"href":642,"dataGaName":643,"dataGaLocation":459},"/privacy/","privacy statement",{"text":645,"config":646},"Cookie preferences",{"dataGaName":647,"dataGaLocation":459,"id":648,"isOneTrustButton":91},"cookie preferences","ot-sdk-btn",{"items":650},[651,653,655],{"text":635,"config":652},{"href":637,"dataGaName":638,"dataGaLocation":459},{"text":640,"config":654},{"href":642,"dataGaName":643,"dataGaLocation":459},{"text":645,"config":656},{"dataGaName":647,"dataGaLocation":459,"id":648,"isOneTrustButton":91},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"allPosts":662,"featuredPost":949,"totalPagesCount":971,"initialPosts":972},[663,690,709,730,751,773,793,812,833,853,873,892,911,930],{"_path":664,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":665,"content":673,"config":683,"_id":686,"_type":14,"title":687,"_source":16,"_file":688,"_stem":689,"_extension":19},"/en-us/blog/devsecops-security-automation",{"title":666,"description":667,"ogTitle":666,"ogDescription":667,"noIndex":6,"ogImage":668,"ogUrl":669,"ogSiteName":670,"ogType":671,"canonicalUrls":669,"schema":672},"Automated security testing for DevSecOps","We share four fool-proof ways to bring your security automation to the next level and five reasons why it's critical.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662504/Blog/Hero%20Images/devsecops-automated-security.jpg","https://about.gitlab.com/blog/devsecops-security-automation","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automated security testing for DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2020-07-08\",\n      }",{"title":666,"description":667,"authors":674,"heroImage":668,"date":676,"body":677,"category":678,"tags":679},[675],"Vanessa Wegner","2020-07-08","\n\n_This is the third in our five-part series on getting started with [DevSecOps](/topics/devsecops/). Part one gives you nine ways to [shift security left](/blog/efficient-devsecops-nine-tips-shift-left/). Part two outlines the steps needed to create [silo-free collaboration](/blog/achieve-devsecops-collaboration/)._\n\nNearly 83% of developers in [GitLab’s 2020 DevSecOps survey](/developer-survey/) say they’re releasing code faster today than ever before thanks to [DevOps](/topics/devops/). About 65% also say security is shifting left in their organizations. How far left is that shift? Not that far: Over 60% of developers don’t actually run static [application security](/topics/devsecops/) testing (SAST) scans, and 73% don’t conduct dynamic application security testing (DAST) scans.\n\nThis needs to change.\n\nSecurity is often a bottleneck to faster releases but it is much too risky to minimize or ignore. DevSecOps promises to bring security  forward in the software development lifecycle (SDLC). This can be done a number of ways but automated security testing streamlines adoption and scalability. A respondent to this year’s DevSecOps Survey summarized it nicely:\n\n> Automated testing and continuous integration have made our deployments safer and more optimized. Now everyone in the team has the permission to deploy the code.\n\n## The need for security automation and good security practices\n\nThere is an attempted cyber-attack [every 44 seconds](https://us.norton.com/blog/emerging-threats/cybersecurity-statistics#:~:text=There%20isn't%20concise%20data,people%20being%20hacked%20per%20year.) on average. \n\n_Every. 44. Seconds._ \n\nThis also equates to approximately 2,200 daily attacks resulting in about 800,000 people being hacked each year. Unfortunately, no one has the time, patience, or bandwidth to keep their eyes and hands ready to stop or address cyber attacks on the horizon. That’s why security automation tools exist.\n\nAnd consider this: cyber attackers aren’t doing everything by hand – they employ automation too. This means security processes also [need automation to keep up](https://www.checkpoint.com/cyber-hub/cyber-security/security-automation/#:~:text=Security%20automation%20is%20the%20automation,scale%20to%20handle%20growing%20workloads.). \n\nA security automation solution can include real-time monitoring tools that constantly manage security vulnerabilities and take automatic action where needed. It’s like adding a second pair of invisible hands to the team to help prevent and resolve security issues. Increased security measures can save any organization time and money and avoid the loss of sensitive files. \n\n\n## 4 Ways to automate security in software development\n\n[Automation](https://docs.gitlab.com/ee/topics/autodevops/) comes in all shapes and sizes. Scans and policies can be programmed manually or come as set operations out of the box; scans can be triggered automatically at code commit or manually initiated; and these scans can result in automated remediation and reports or they can require human intervention. Here are four ways automated security testing can be integrated into your software development practices:\n\n1. Automate security scans for every code change by [running SAST scans](https://docs.gitlab.com/ee/user/application_security/sast/index.html). For ease of assessment, results should be sorted by the priority level of the vulnerability.\n\n1. Scan results should automatically initiate a work ticket or issue, or may stop a build depending on the policy in place. These results should be presented to the developer – in the workspace or IDE in use to avoid context switching – for instant remediation.\n\n1. Policies are automatically applied upon code commit with the option to capture and approve exceptions as needed.\n\n1. Analyze running web applications for known vulnerabilities [using DAST scans](https://docs.gitlab.com/ee/user/application_security/dast/). In GitLab, DAST scans can be automated by [including the CI job in your existing .gitlab-ci.yml file](https://docs.gitlab.com/ee/user/application_security/dast/#configuration), or by [using Auto DAST](https://docs.gitlab.com/ee/topics/autodevops/stages.html#auto-dast).\n\n\n\n## 5 Benefits of automated security\n\nIn addition to making jobs easier across development, security, and operations, automated security testing will help your team produce a safer and better-quality result.\n\n1. **Reduced human error.** Across all functions, automation reduces human error by taking the manual work out of tedious processes that rely on excessive attention to detail.\n\n1. **Early security intervention.** By placing security earlier in the SDLC, threats and vulnerabilities can be detected and addressed faster – hopefully before there’s even a chance that they’re exposed.\n\n1. **Streamlined vulnerability triage.** Automated scan reports can present the threat level of any vulnerability so that developers and security engineers alike can decide which must be addressed immediately and who is responsible for resolving the problem.\n\n1. **Repeatable security checks.** Any automated task should be repeatable, which means that all code can be reviewed and assessed the same way every time. This creates a trusted and secure environment and code base, and also helps reviewers identify patterns when results are presented in a consistent manner.\n\n1. **Responsibility clarification.** Automation takes uncertainty out of DevSecOps. Shifting security can cause confusion about who is responsible for what. But automated scans can present remediation options for the party responsible _at that stage of development_.\n\nBut it is also important to find a productive balance between automated security testing and manual work. For example, trying to automate overly rigorous policies may prove detrimental to business objectives and may not be realistically achieved – it’s important to find a balance between policy compliance and efficiency. It’s also key that automation doesn’t obstruct visibility. Make sure there is still a trail of operations to review if necessary – automated processes should still generate reports of what was done, when, and why the action was triggered. Last, but certainly not least: Automation is **not** meant to replace human beings. It is a tool meant to make their work more efficient and help them produce better results for the team, the business, and the customer.\n\n## Security automation vs. security orchestration\n\nThough they are different concepts, security automation and security orchestration perform similar functions. One serves the other to make security processes more efficient. \n\nSecurity automation focuses on automating individual tasks (possibly with AI technology) to simplify essential processes for security analysts. On the flip side, security orchestration connects tools in use alongside automation and streamlines the whole security procedure. Orchestration drives efficient automation.\n\n## Types of security automation tools\n\nTo keep track of security incidents (and prevent them in the future), teams use security automation tools and different types of security scanning. A few common types of security automation tools include:\n\n- Security Information and Event Management (SIEM): SIEMs help to automatically collect data across multiple sources and use it to give contextual background about security incidents.\n- Security Orchestration, Automation, and Response (SOAR): SOAR takes SIEM a step further than just contextual data collection and adds automated response options to the mix. SOAR alerts security analysts to problems and shuts down cyber threats automatically. \n- Extended Detection and Response (XDR): This proactive, automated solution combines SIEM, SOAR, and other security options into one managed source.\n\n## How security automation works with security analysts\n\nA human can’t do all of the necessary security work, nor can a security automation tool. It’s a symbiotic relationship to ensure that an organization feels the least amount of negative impact from a cyber attack possible. \n\nA security analyst, responsible for vulnerability management by identifying and resolving security flaws and conducting [audits](https://about.gitlab.com/blog/what-you-need-to-know-about-devops-audits/), gets a lot of help from automation. An automated security system can make someone aware of a problem and even help to resolve it while removing manual time constraints.\n\n**Read more about DevSecOps:**\n* [Efficient DevSecOps: 9 tips for shifting left](https://about.gitlab.com/blog/efficient-devsecops-nine-tips-shift-left/)\n* [Want better DevSecOps? Try cross-functional collaboration](https://about.gitlab.com/blog/achieve-devsecops-collaboration/)\n* [Compliance made easy with GitLab](https://about.gitlab.com/blog/compliance-made-easy/)\n* [How application security engineers can use GitLab to secure their projects](https://about.gitlab.com/blog/secure-stage-for-appsec/)\n\nCover image by [Daniele Levis Pelusi](https://unsplash.com/@yogidan2012) on [Unsplash](https://unsplash.com/photos/Pp9qkEV_xPk)\n{: .note}\n\n\n\n","insights",[680,681,682,9],"DevOps","security","workflow",{"slug":684,"featured":6,"template":685},"devsecops-security-automation","BlogPost","content:en-us:blog:devsecops-security-automation.yml","Devsecops Security Automation","en-us/blog/devsecops-security-automation.yml","en-us/blog/devsecops-security-automation",{"_path":691,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":692,"content":698,"config":703,"_id":705,"_type":14,"title":706,"_source":16,"_file":707,"_stem":708,"_extension":19},"/en-us/blog/devsecops-security-standardization",{"title":693,"description":694,"ogTitle":693,"ogDescription":694,"noIndex":6,"ogImage":695,"ogUrl":696,"ogSiteName":670,"ogType":671,"canonicalUrls":696,"schema":697},"DevSecOps basics: 5 steps to standardize (and then scale) security","DevSecOps is incomplete without speed and scale. Standardize security to make it happen.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663613/Blog/Hero%20Images/devsecops-security-standardization.jpg","https://about.gitlab.com/blog/devsecops-security-standardization","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevSecOps basics: 5 steps to standardize (and then scale) security\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2020-07-20\",\n      }",{"title":693,"description":694,"authors":699,"heroImage":695,"date":700,"body":701,"category":678,"tags":702},[675],"2020-07-20","\n_This is the fifth in our five-part series on [DevSecOps](/topics/devsecops/) basics. Part one offers nine tips to truly [shift left](https://about.gitlab.com/blog/efficient-devsecops-nine-tips-shift-left/). Part two outlines the steps needed to create [silo-free collaboration](https://about.gitlab.com/blog/achieve-devsecops-collaboration/). Part three looks at the importance of [automated security testing](https://about.gitlab.com/blog/devsecops-security-automation/). And part four details how to create a [strong security culture](https://about.gitlab.com/blog/security-culture-devsecops/)._\n\nStandardizing security policies comes in a variety of forms: regulatory compliance, access controls, acceptable use policies, security as code, and automation, to name a few. Ultimately, the idea is that your security team knows exactly what policies and methods have been used or applied to each project. \n\nThe goals of standardization are consistency, traceability, and repeatability. By consistently using the same security methods across all work, security knows what has been protected and what hasn’t. This helps them apply additional measures where necessary, and makes them aware of any needed exceptions. Ensuring that security methods are repeatable helps to expand adoption and scale security to the entire organization or enterprise. \n\n## Building a standardized security program\n\nA holistic security program should be composed of different levels of policies and compliance. Some policies should be company-wide, such as an [acceptable use policy](https://whatis.techtarget.com/definition/acceptable-use-policy-AUP), some will fulfill regulations like the [GDPR](https://gdpr-info.eu/) or [CCPA](https://oag.ca.gov/privacy/ccpa), and some will be specific to certain organizations within your business. \n\n### Standardizing security in DevOps\n\n[DevSecOps can be executed sustainably](/solutions/security-compliance/) at scale with standardized security practices. Here are five ways to standardize security across all of your development projects.\n\n#### Educate\n\nProvide security training and education to every employee. Companywide security initiatives [help to build a security culture](https://about.gitlab.com/blog/security-culture-devsecops/) and empower employees to take responsibility for security in their own work. Standardized training also spreads awareness of mandatory policies and alerts employees to the actions taken to both secure day-to-day operations and protect their customers. \n\n#### Coordinate\n\nCoordinate a predefined set of security requirements among dev, sec, and ops that can be coded into your pipeline and applied to every project. These can ensure regulatory compliance, foster secure coding practices, trigger red flags or notifications, and educate employees on security best practices.\n\n#### Authenticate\n\nAccess controls are a critical component of any security framework, and should be continually monitored and evaluated. By keeping close tabs on who needs access to what, you’re able to build a solid wall around your most critical processes and assets. This eliminates unnecessary access to sensitive data, and helps streamline tracing, recovery, and remediation efforts when something goes wrong. Access control policies also help defend your business by enhancing authentication requirements.\n\n#### Integrate\n\nEmbed scan and test tools within your development pipeline. Static and dynamic application security testing (SAST and DAST, respectively) can be set to run at every code commit and in the review app. Other tools and tests include IAST, fuzzing, licence compliance, container scanning, and dependency scanning (among others). Embedding tools directly into the pipeline allows you to know exactly what the code has been evaluated for, and also what the code has not been checked for. \n\n#### Automate\n\nIn DevSecOps, automation is the true key to standardized security practices as it allows for fast, secure development at scale. There are a number of ways to automate security within and around your development pipeline – the trick is to find an appropriate balance between automation and manual intervention. Automated policies should serve as guardrails that guide development smoothly from one security check to the next, but they should also allow for exceptions when needed. These guardrails should automatically generate reports from code scans and consolidate them into a [security dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/) for review. This helps to minimize human error and any false positives or negatives, allows for consistent vulnerability reporting, and can be used to measure a team’s performance against secure coding expectations. Automation also helps to prevent overly complex security programs by reducing ad-hoc policies and redundant work.\n\n## The best security programs will change\n\nSecurity will never be a set-it-and-forget-it practice. The threat landscape is constantly changing, external regulations will continue to evolve, and internal business requirements will always keep you on your toes. While setting standards for security will help your team manage the workload, these standards need to be constantly re-evaluated and updated. Outdated security practices will undermine even the most solid programs, so it’s important to use part of the time saved from standardizing and automating to plan for the future. \n\n_How efficient are your DevSecOps practices? [Take our DevSecOps Maturity Assessment to find out.](https://about.gitlab.com/resources/devsecops-methodology-assessment/)_\n\n**Learn more about DevSecOps:**\n* [Case Study: How Jasper Solutions offers “DevSecOps in a box” with GitLab”](https://about.gitlab.com/customers/jasper-solutions/)\n* [How to capitalize on GitLab Security tools with external CI](https://docs.gitlab.com/ee/integration/jenkins.html)\n* [How to overcome toolchain security challenges with GitLab](https://about.gitlab.com/blog/toolchain-security-with-gitlab/)\n\nCover image by [Andrew Ridley](https://unsplash.com/@aridley88) on [Unsplash](https://unsplash.com/photos/jR4Zf-riEjI)\n{: .note}\n",[680,681,682,9],{"slug":704,"featured":6,"template":685},"devsecops-security-standardization","content:en-us:blog:devsecops-security-standardization.yml","Devsecops Security Standardization","en-us/blog/devsecops-security-standardization.yml","en-us/blog/devsecops-security-standardization",{"_path":710,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":711,"content":717,"config":724,"_id":726,"_type":14,"title":727,"_source":16,"_file":728,"_stem":729,"_extension":19},"/en-us/blog/evolution-of-zero-trust",{"title":712,"description":713,"ogTitle":712,"ogDescription":713,"noIndex":6,"ogImage":714,"ogUrl":715,"ogSiteName":670,"ogType":671,"canonicalUrls":715,"schema":716},"The evolution of Zero Trust","Zero Trust may be one of the hottest topics in security today, but it's not exactly new. Here's a history.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664096/Blog/Hero%20Images/evolution-of-zero-trust.jpg","https://about.gitlab.com/blog/evolution-of-zero-trust","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The evolution of Zero Trust\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Loveless\"}],\n        \"datePublished\": \"2019-04-01\",\n      }",{"title":712,"description":713,"authors":718,"heroImage":714,"date":720,"body":721,"category":681,"tags":722},[719],"Mark Loveless","2019-04-01","\nUpdate: This is part 1 of an ongoing [Zero Trust series](/blog/tags.html#zero-trust). See our next post: [Zero Trust at GitLab: Problems, goals, and coming challenges](/blog/zero-trust-at-gitlab-problems-goals-challenges/).\n{: .alert .alert-info .note}\n\nI was not at the 2019 [RSA Conference](https://about.gitlab.com/events/rsa/) this year, so I asked my friends and colleagues what it was like and if they enjoyed themselves.\nNearly every person mentioned the phrase \"Zero Trust Networking\" during their recounting of events, and the vast majority of them seemed worn down with the phrase by the end of the conference.\nSeveral mentioned it was the \"hot topic\" – the term ‘Zero Trust’ actually made the RSAC Buzzwords Top 3 list.\nI have a few thoughts on the subject, because it is a solid way to move forward in the security realm, but I also wanted to remind people that this is not some new thing that came up this year – this is a concept whose roots stretch back a few decades.\nI also wanted to point out that Zero Trust will not end attacks, as attacks never end.\n\nThis is the first of a series of blog posts from the security team here at GitLab explaining Zero Trust and how we are tackling it.\nBut for these discussions to make sense, we need to show some perspective, so first, a bit of a history lesson.\nThere were three major shifts that brought about Zero Trust, all building upon each other.\nLet’s cover them, one by one.\n\n## First shift: Death of the perimeter\n\nBack in the early days of the internet, if you wanted to attack a target network, you would do a bit of reconnaissance and discover things like hostnames and IP ranges.\nYou would probe, find the available services on these target hosts, then begin trying to compromise them.\nThis was because the individual host systems were fairly wide open.\nSystem administrators needed a way to limit access to the servers and workstations under their control, while allowing legitimate access to users. Remote workers were rare, as the bulk of users were in an office building together.\nSo the [network firewall](https://en.wikipedia.org/wiki/Firewall_(computing)) was born in the early 1990s, restricting access between an organization’s internal network and the internet.\n\nAttackers were accustomed to [port scanning](https://en.wikipedia.org/wiki/Port_scanner) the target, finding the various services, and taking their pick of which service to attack.\nTo adapt to the newly installed firewall, attackers began to focus on the services that were allowed through the firewall. Back then, [organizations still controlled their own servers](https://en.wikipedia.org/wiki/DMZ_(computing)), running things like DNS, email, and web services.\nThese types of common services required holes be punched in the firewall to allow legitimate traffic to them, and so the attackers simply came in with the legitimate traffic.\n\nAt the same time, desktop operating systems and corporate applications began to move toward interacting and sharing information with each other, and as system administrators felt a level of control with the firewall, no one really pushed back very hard against these various operating systems and their noisy applications.\nIn fact, using those same firewall rules, it was possible to allow customers, business partners, and vendors a bit more access to the precious internal network by creating large holes to allow the access.\nThis meant if the attacker could figure out who your trusted partners were, they could compromise them and then come in through the large hole created for those same trusted partners.\n\n>This meant if the attacker could figure out who your trusted partners were, they could compromise them and then come in through the large hole created for those same trusted partners.\n\nIt became common knowledge that once an attacker got a foothold into that internal network, it was usually quite easy to move about within the organization.\nThe attackers adapted. The firewall lost a lot of its value, and to many attackers it became meaningless.\n\nI remember meeting [Bill Cheswick](https://en.wikipedia.org/wiki/William_Cheswick) (one of those early pioneers that helped bring about the firewall) at a security conference, and I was able to corner him and talk shop.\nSomething both of us gravitated towards was this concept of how the infamous \"network perimeter\" was basically an illusion.\nIt _could_ work, but not without changing a serious amount of tech to make it happen.\nHow did each of us secure our respective home systems?\n[Hardening each system individually](https://en.wikipedia.org/wiki/Bastion_host), and just eliminating the concept of the perimeter.\nSure, we both kept a perimeter, but it was maintained with a few router rules, and was more like a white picket fence than a castle wall. To us, the network perimeter was dead.\n\n>Sure, we kept a perimeter, but it was more like a white picket fence than a castle wall. To us, the network perimeter was dead.\n\nThis was a common topic among security practitioners and network administrators at the time, all of us discussing and arguing the fine points the same way Cheswick and I did.\nWe needed some way to deal with the attacker since the perimeter was dead or dying.\nThe concept of Zero Trust networking was born.\nThis started as rumblings during the early 2000s and came into an actual concept of sorts through the [Jericho Forums](https://en.wikipedia.org/wiki/Jericho_Forum) in 2004, and by 2010 or so it even had a name.\nBut I am getting ahead of myself. Other things were happening.\n\n## Second shift: The cloud\n\nGetting [slashdotted](https://en.wikipedia.org/wiki/Slashdot_effect).\n[Distributed denial of service attacks](https://en.wikipedia.org/wiki/Denial-of-service_attack).\nJust not having the bandwidth on your internet-connected web server in your data center to handle the traffic.\nThis internet thing was really taking off, and the World Wide Web was driving it.\n A few companies figured out clever ways to provide server services for organizations all over the globe, and were known as [Content Distribution Networks](https://en.wikipedia.org/wiki/Content_delivery_network) (CDNs), and CDNs gave these organizations a way to upload web content to these servers.\n Even though content might be replicated across the CDN’s dozens of data centers world wide, it was one single entity as far as a typical website visitor was concerned.\n\nNot only could you upload your corporate web server to the CDNs, after a while you could basically pay for virtual servers that you could use for any purpose.\nAs web servers developed and web apps become more ambitious, some companies offered up their services to other companies, some even broke out of the \"web app\" mold and began to offer robust services that replaced desktop applications.\n\n### [The cloud](https://en.wikipedia.org/wiki/Cloud_computing) had arrived.\n\nNot everyone liked the cloud, in fact many organizations were quite resistant to it at first. Others immediately saw the value in it and moved everything to the cloud.\n\nAttackers did what they did best: they adapted. People new to the cloud would often get permissions wrong and expose sensitive data.\nAny bad coding practices they had before the cloud were just uploaded anyway as the cloud didn’t magically fix bugs.\nMoving poorly-coded services in the cloud meant even more holes in firewalls if old legacy data was still stored “on prem”.\nHowever, more often than not it meant these services and the insecure methods used to reach its data was simply moved up to the cloud, sometimes with even more exposure.\nAttackers got to know how these new technologies worked and understood the flaws that existed in the implementations and kept on compromising systems.\n\nWhile the cloud shift created its fair share of upheaval,  it certainly set the stage for the third major shift.\n\n## Third shift: Mobility\n\nWorking remotely? We'd had dial up networking via modem at first, followed by the infamous VPN.\nAs one might imagine, this was an obvious one that certainly bypassed a firewall on a network perimeter. Knowing usernames and passwords had always been a goal of attackers, and if they managed to obtain that information they could certainly plug it into a VPN for access.\n\n### To help protect the username and password, [Two Factor Authentication](https://en.wikipedia.org/wiki/Multi-factor_authentication) (2FA) came about.\n\nThe infamous RSA token was technology I encountered ages ago, and it was certainly all the rage during the first decade of this century.\nMy first encounter was when using a VPN in the late '90s.\nA decade later when I worked for MITRE, I carried no fewer than four RSA tokens (not unheard of at the time for many organizations!) for not just remote access, but for special access to projects funded by different government agencies.\nYou were outside that perimeter and needed in, but as users and their passwords were considered a security risk for any number of reasons (poor password hygiene, easily-fooled help desk personnel responsible for resets, etc.), this direct and open exposure of the internal network via the VPN was too insecure.\nSomething you know (the password) and something you have (that RSA token with its changing six-digit number) made it way more difficult for attackers to get in.\n\nOver 20 years ago, everyone had a desktop machine, but those road warriors that travelled for business would be issued a second system – a laptop.\nThis shifted as it made sense to give all of the employees laptops, and the more expensive desktop systems were only issued by those doing specific jobs that required the extra desktop horsepower.\n\nThe phone also helped push forward the mobility concept, as it expanded from a telephone with internet access to a small internet-connected computer loaded with cloud-based apps that also works as a telephone.\n\n### We became mobile.\n\nEither through SMS messaging, an \"authentication app\" that did TOTP, or a full-fledged 2FA app that supported push technology, the phone became the \"something you have\" and essentially killed the old RSA token.\nAnd of course something else happened with all this mobility, it increased the ability for one to work from anywhere.\nMost of those \"Whatever as a Service\" apps were using web-based protocols to communicate to their Cloud presence, and we'd figured out how to log a person in and do 2FA ages ago.\nThere was no need for a perimeter for the basic end user in an organization.\n\nThis was a slow build to a large upheaval in information security.\nBut what really drove home the big security issues of this brave new world was an event.\nThe culmination of our three major shifts – a teaching moment, as they say.\n\n## The big teaching moment\n\nWhat was the big teaching moment?\n\n### The obvious answer everyone talks about is [Operation Aurora](https://en.wikipedia.org/wiki/Operation_Aurora).\n\nThis was the breach at Google that got them to take a look at this whole Zero Trust thing, build their version of it called [BeyondCorp](https://cloud.google.com/beyondcorp/), and begin to implement it internally.\nIn 2014 Google began to publish information about it.\n\nGoogle had been targeted by [PLA Unit 61398](https://en.wikipedia.org/wiki/PLA_Unit_61398).\nI recognized PLA Unit 61398 from my defense contractor days as “Comment Crew,” as one of their backdoor programs that would make innocent-looking web queries to a Comment-Crew-controlled web server, and obfuscated comments in the HTML returned to the backdoor were actually commands for the backdoor to carry out.\nThey targeted a lot of organizations from large corporations to defense contractors to U.S. government agencies.\n\nThe press at the time had a lot of quotes from security experts pooh-poohing the whole [Advanced Persistent Threat](https://www.fireeye.com/current-threats/apt-groups.html) (APT) thing, claiming that APT attacks weren’t sophisticated as the \"advanced\" part of APT implied.\nHowever, most of these people had either never played offense, or they didn't deal with APT as a part of daily life.\nI distinctly remember the Google attack because during that same timeframe, Comment Crew’s attack was repeated against my employer and others. We were not breached in that case and we probably called it “a typical Tuesday,” but many naysayers in the security community finally had to admit that APT was in fact real.\n\n### But a _huge_ teaching moment was the [RSA hack in 2011](https://www.wired.com/2011/08/how-rsa-got-hacked/).\n\nAgain, maybe not the most sophisticated of attacks to gain entry ([phishing](https://en.wikipedia.org/wiki/Phishing) email), but it was just enough to gain a foothold.\nOnce inside, they pivoted and managed to compromise RSA in what was one of the worst ways possible.\nPeople argue about exactly what level of compromise they achieved, but in the end the attackers could program up their own tokens to allow bypass of RSA SecurID implementations at RSA customer locations.\n\nOne important point to make here – 2FA was an extremely important protection mechanism for organizations like the U.S. Government and all of its many defense contractors.\nAPT actors targeted things like documents pertaining to research, plans involving various defense technologies, and credentials for regaining access if their intrusion was discovered and the APT actors were shut out.\nSince those credentials were protected by 2FA via RSA SecurID tokens, complete panic ensued. _Millions_ of tokens had to be manufactured, provisioned, and deployed to customers who had to configure their systems and deploy them internally.\nDuring this time all organizations still had to function, and APT-sponsored attacks against targets that took advantage of the stolen RSA technology began to appear.\n\nThe basic corporate network at the time was still mainly perimeter-based, even though their perimeter was full of holes, allowing everything from remote users to trusted vendors, partners, and customers.\n\n> The cloud was there, but many companies had their feet in both worlds.\n\nThe cloud was there, but many companies had their feet in both worlds. They would often make architectural choices on technology based upon getting systems to just talk to each other and allow data access _without_ fully considering security issues.\nThe user population was increasingly mobile and, by its very nature, was pushing solutions to the absolute limit.\nAnd now, the one thing that at least protected access to it all – a layered security approach to credentials – was compromised.\n\n## Enter Zero Trust\n\nBeyondCorp was Google’s answer to the threat they faced – a sophisticated adversary that took advantage of their employees and gained privileged access to sensitive assets.\nGoogle published a lot of the material they developed, thinking it would help others deal with the same situation.\nFor those of us in the more threatened world of government agencies and government contractors, we didn’t give Google’s BeyondCorp a second thought.\nWe had defenses, we’d learned how to deal with these type of attackers, we’d even dealt with Comment Crew ourselves and could keep them at bay.\n\nThe RSA breach was a different scenario. An area of trust – 2FA – was completely compromised.\nRSA didn’t run out and build BeyondCorp, but it certainly inspired a large number of people to start looking for answers, and Zero Trust really began to check many of the boxes to add in the protections we needed.\nIn essence, the RSA event gave us a reason to implement Zero Trust.\nWe needed more than 2FA, more than inventory control, more than patch management, we needed to be able to establish a trusted environment and could not with the way things were.\n\n### Essentially, it boils down to this: Zero Trust assumes you do not trust the user nor the user’s device.\n\nThe user has to prove that they are who they say they are and that they meet policy requirements to perform the actions they are wanting to perform.\nThe device has to prove that it is what is says it is, including patch levels.\nEven automated processes such as systems that communicate between each other have to prove themselves as well.\nThe transaction should be valid and the processes are allowed to perform the actions they are performing.\nThis means any information in transition needs to be encrypted using secure algorithms, all transactions are signed and signatures validated, and there is a secure audit trail to ensure all parts of the operation can be examined.\n\n### Are we there yet with Zero Trust?\n\nNo. In fact, the hard part isn’t so much the implementation of it, it is getting it implemented everywhere. Most Zero Trust solutions address a lot of the concerns of the past, but they are not perfect by any means.\nMany organizations will be living in “mixed” environments of old and new for quite a while.\nThe applications that implement the raw components of Zero Trust need to be secure.\nThere will be various policy decisions on how to act on various accesses and requests involving users, devices, services, and data that if not properly defined could result in the wrong employee gaining access to sensitive material.\nAnd of course we will always face a clever adversary trying to bypass, break, and compromise whatever security controls are put in place.\n\nAt least with Zero Trust, we have a leg up. In the forthcoming [series of blog posts](/blog/tags.html#zero-trust), we’ll share GitLab’s story with Zero Trust.\nGitLab is a cloud native, all-remote company with employees from more than 50 countries.\nWe also strive to be as open as we can be about how we work.\n\nWe invite you to follow our journey and contribute your thoughts, questions and experiences around Zero Trust along the way.\n\nPhoto by [Matthew Henry](https://unsplash.com/photos/fPxOowbR6ls?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/security?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[723,681,9],"inside GitLab",{"slug":725,"featured":6,"template":685},"evolution-of-zero-trust","content:en-us:blog:evolution-of-zero-trust.yml","Evolution Of Zero Trust","en-us/blog/evolution-of-zero-trust.yml","en-us/blog/evolution-of-zero-trust",{"_path":731,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":732,"content":738,"config":745,"_id":747,"_type":14,"title":748,"_source":16,"_file":749,"_stem":750,"_extension":19},"/en-us/blog/happy-birthday-secure-by-design",{"title":733,"description":734,"ogTitle":733,"ogDescription":734,"noIndex":6,"ogImage":735,"ogUrl":736,"ogSiteName":670,"ogType":671,"canonicalUrls":736,"schema":737},"Happy birthday, Secure by Design!","The U.S. government's initiative to ensure greater security in software products turns one. Find out what GitLab has done to align with this critical effort.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664530/Blog/Hero%20Images/AdobeStock_282096522.jpg","https://about.gitlab.com/blog/happy-birthday-secure-by-design","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Happy birthday, Secure by Design!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joel Krooswyk\"}],\n        \"datePublished\": \"2024-04-30\",\n      }",{"title":733,"description":734,"authors":739,"heroImage":735,"date":741,"body":742,"category":681,"tags":743},[740],"Joel Krooswyk","2024-04-30","When the Cybersecurity and Infrastructure Security Agency (CISA) first published its [Secure by Design](https://www.cisa.gov/securebydesign) software protection initiative on April 13, 2023, the industry paid close attention. The initiative urges all software manufacturers to take the steps necessary to ensure that the products they ship are, in fact, secure by design. At GitLab, we quickly assessed our alignment with the initiative and over the past year have continued to innovate in accordance with CISA's guidelines.\n\nCISA's Secure by Design introduced three software security principles:\n\n1. Take ownership of customer security outcomes.\n\n2. Embrace radical transparency and accountability.\n\n3. Build organizational structure and leadership to achieve these goals.\n\n## A year of government guidance  \n\nThe U.S. government has produced significant guidance throughout the past year that reflects the Secure by Design theme. Here are just a few highlights:\n\n* August 2023: ONCD in partnership with several other agencies kicked off the [OS3i Initiative](https://www.whitehouse.gov/oncd/briefing-room/2023/08/10/fact-sheet-office-of-the-national-cyber-director-requests-public-comment-on-open-source-software-security-and-memory-safe-programming-languages/) to prioritize focus areas related to open source software security.\n* August 2023: NIST produced [SP 800-204D ](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-204D.pdf)to provide practical software supply chain security strategies for DevSecOps CI/CD pipelines.\n* October 2023: CISA released a second iteration of the [Secure by Design](https://www.cisa.gov/sites/default/files/2023-10/SecureByDesign_1025_508c.pdf) document.\n* October 2023: The [AI Executive Order](https://www.whitehouse.gov/briefing-room/statements-releases/2023/10/30/fact-sheet-president-biden-issues-executive-order-on-safe-secure-and-trustworthy-artificial-intelligence/) was issued by the Biden Administration. Since then, AI engagement guidelines have been produced by most government agencies.\n* December 2023: CISA produced [Memory Safe Roadmap guidance](https://www.cisa.gov/sites/default/files/2023-12/The-Case-for-Memory-Safe-Roadmaps-508c.pdf).\n* February 2024: NIST released the [CyberSecurity Framework 2.0](https://www.nist.gov/news-events/news/2024/02/nist-releases-version-20-landmark-cybersecurity-framework).\n* March 2024: CISA and OMB published the [Secure Software Development Attestation Form](https://www.cisa.gov/secure-software-attestation-form) and opened a [repository](https://www.cisa.gov/news-events/news/cisa-publishes-repository-software-attestation-and-artifacts) for collection of the attestations.\n\n## How GitLab has evolved with the Secure by Design initiative\n\nGitLab has also continued to grow in alignment with the Secure by Design initiative over the past year. Here are some examples.\n\n### GitLab signed the Secure by Design Pledge\n\nGitLab is proud to have signed the CISA [Secure by Design Pledge](https://www.cisa.gov/securebydesign/pledge).\n\n\"The Secure by Design concepts are well-aligned with GitLab's core values. As the most comprehensive AI-powered DevSecOps platform, GitLab offers its unwavering support towards CISA’s efforts to instill a Secure by Design mindset in software manufacturers. GitLab is proud to make the Secure by Design Pledge, and we firmly believe these efforts will help us enable everyone to innovate and succeed on a safe, secure, and trusted DevSecOps platform,\" said GitLab Chief Information Security Officer Josh Lemos.\n\n### \"Secure by default\" practices\n\nConfiguring and securing installations and users can be a challenge. GitLab developed granular user access with [custom user roles](https://docs.gitlab.com/ee/user/custom_roles.html) and [customizable permissions](https://docs.gitlab.com/ee/user/custom_roles/abilities.html). Management of [tokens](https://docs.gitlab.com/ee/security/token_overview.html), [API service accounts](https://docs.gitlab.com/ee/user/profile/service_accounts.html), and [credentials](https://docs.gitlab.com/ee/administration/credentials_inventory.html) have been in focus with continuous improvements and more rigorous authentication security capabilities throughout the year. \n\n### Secure software development practices\n\nWith every release, GitLab has incrementally enhanced scanning accuracy, coverage, and capabilities across our entire suite of security analyzers.\n\n- Some [scan results are presented in developer context](https://docs.gitlab.com/ee/user/application_security/#gitlab-workflow-extension-for-vs-code) (like the IDE) simplify workflows and shift security further left.\n\n- [CI/CD pipeline](https://docs.gitlab.com/ee/ci/pipelines/) capabilities, which have been expanded and simplified, ensure better functionality while also bolstering security and compliance with enforcement and policies.\n\n- [Vulnerability management](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/) provides better views at scale, improved filtering, and more options to take action against vulnerability findings.\n\n- [Artifact attestations](https://docs.gitlab.com/ee/ci/yaml/signing_examples.html) provide a trustworthy authentication of each software artifact.\n\n### Secure business practices\n\nEach GitLab release demonstrated increased focus on compliance. Enhanced [auditing](https://docs.gitlab.com/ee/administration/audit_event_reports.html) and [event streaming](https://docs.gitlab.com/ee/administration/audit_event_streaming/) provide accountability across the entire SDLC. Compliance teams are now better equipped to proactively align to requirements, thanks to increased [policy management](https://docs.gitlab.com/ee/administration/compliance.html#policy-management), [workflow automation](https://docs.gitlab.com/ee/administration/compliance.html#compliant-workflow-automation), visibility via [compliance reporting](https://docs.gitlab.com/ee/user/compliance/compliance_center/), and [exportability of data](https://docs.gitlab.com/ee/user/compliance/compliance_center/compliance_standards_adherence_dashboard.html#export-compliance-standards-adherence-report-for-projects-in-a-group). \n\n## GitLab's Secure by Design features\n\nHere are some of the features and capabilities that align with Secure by Design.\n\n### SBOMs\n\nGitLab’s dynamic [software bill of materials](https://about.gitlab.com/blog/the-ultimate-guide-to-sboms/) focus improved SBOM generation while adding third-party SBOM intake capabilities. This also led to the ability to combine SBOMs, as well as to provide full attestation for standardized SBOM artifacts. Enhancements such as cross-project dependency visibility as well as dependency graphs enabled a better view of SBOM risk at scale. Continuous vulnerability scanning for SBOMs was also added during the past year, providing continuous insights for emergent risks for projects that are not under continuous development – no CI/CD pipeline required.\n\n### Vulnerability management\n\nNotable improvements can be seen in vulnerability management as GitLab product updates increased visibility to vulnerabilities at scale, added flexibility to [filtering](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/#group-vulnerabilities), and added [remediation detail](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#vulnerability-dismissal-reasons) options. With [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our AI-powered suite of features, AI-assisted vulnerability remediation is taking a dramatic step forward.\n\n### AI-powered workflows\n\nSpeaking of AI, we deployed many [GitLab Duo features](https://about.gitlab.com/gitlab-duo/#features) during the past year that can help expedite Secure by Design execution, including:\n\n1. Code Suggestions - Use natural language processing to generate new code.\n2. Code Explanation - Discover what that uncommented code does in order to  properly maintain code bases and provide contextually aware product updates.\n3. Code Refactoring - Refactor legacy code bases into new libraries, functions, or memory-safe languages.\n4. Vulnerability Explanation - Understand the impact of a vulnerability and why it is creating risk to enable more accurate and thorough remediation.\n5. Vulnerability Resolution - Automatically resolve vulnerabilities to save significant amounts of time.\n6. Root Cause Analysis - Determine the root cause for a pipeline failure and failed CI/CD build.\n\n### Radical transparency\n\nGitLab continues to embrace its Transparency value by creating the [GitLab Trust Center](https://trust.gitlab.com/) and the [GitLab AI Transparency Center](https://about.gitlab.com/ai-transparency-center/). These public-facing pages provide radical transparency to GitLab's values, ethics, feature details, and compliance statements – including a [NIST Secure Software Development Framework](https://csrc.nist.gov/projects/ssdf) self-attestation letter.\n\n## What's next?\n\nAs Secure by Design enters its second year, we look forward to additional guidance and initiatives from CISA and other government agencies that will provide users around the world with more securely developed software.\n\n> Want to test-drive GitLab's security features? [Try GitLab Ultimate for free for 30 days](https://about.gitlab.com/free-trial/devsecops/).",[681,479,744,9,184],"DevSecOps",{"slug":746,"featured":91,"template":685},"happy-birthday-secure-by-design","content:en-us:blog:happy-birthday-secure-by-design.yml","Happy Birthday Secure By Design","en-us/blog/happy-birthday-secure-by-design.yml","en-us/blog/happy-birthday-secure-by-design",{"_path":752,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":753,"content":759,"config":767,"_id":769,"_type":14,"title":770,"_source":16,"_file":771,"_stem":772,"_extension":19},"/en-us/blog/migration-guide-github-advanced-security-to-gitlab-ultimate",{"title":754,"description":755,"ogTitle":754,"ogDescription":755,"noIndex":6,"ogImage":756,"ogUrl":757,"ogSiteName":670,"ogType":671,"canonicalUrls":757,"schema":758},"Migration guide: GitHub Advanced Security to GitLab Ultimate","Understand the similarities and differences between GitLab Ultimate and GitHub Advanced Security. Then follow this in-depth tutorial to make the move to the GitLab DevSecOps platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666187/Blog/Hero%20Images/blog-image-template-1800x945__6_.png","https://about.gitlab.com/blog/migration-guide-github-advanced-security-to-gitlab-ultimate","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Migration guide: GitHub Advanced Security to GitLab Ultimate\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2024-05-01\",\n      }",{"title":754,"description":755,"authors":760,"heroImage":756,"date":762,"body":763,"category":681,"tags":764},[761],"Fernando Diaz","2024-05-01","GitLab is the most comprehensive AI-powered DevSecOps platform, enabling organizations to deliver more secure software faster with one platform for your entire software delivery lifecycle. GitHub provides an Advanced Security add-on, which enables additional security features within GitHub. However, it lacks the depth and breadth of security features provided natively by GitLab. Organizations looking to migrate to GitLab Ultimate to enhance their security across all areas of the SDLC can use this guide to compare the two offerings and as a tutorial to move to the GitLab platform.\n\nThis article includes:\n\n- [A comparison between GitLab Ultimate and GitHub Advanced Security](#a-comparison-between-gitlab-ultimate-and-github-advanced-security)\n- [How to migrate a GitHub repository to GitLab](#how-to-migrate-a-github-repository-to-gitlab)\n- [How to migrate from GitHub Advanced Security to GitLab Ultimate feature-by-feature](#how-to-migrate-feature-by-feature)\n- [An introduction to additional GitLab Ultimate's security features](#additional-gitlab-ultimate-security-features)\n\n## A comparison between GitLab Ultimate and GitHub Advanced Security\n\n[GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/) is GitLab's top subscription tier for enterprises looking to deliver secure software faster. GitHub Advanced Security is an add-on to GitHub Enterprise, which enables additional security features.\n\n### Similarities between GitLab Ultimate and GitHub Advanced Security\n\nGitLab Ultimate and GitHub Advanced Security both provide:\n- Static Application Security Testing ([SAST](https://docs.gitlab.com/ee/user/application_security/sast/)), secret scanning, and dependency scanning\n- contextual vulnerability intelligence and resolution advice\n- a list of dependencies or software bill of materials ([SBOM](https://about.gitlab.com/blog/the-ultimate-guide-to-sboms/))\n- security metrics and insights\n\n### Differences between GitLab Ultimate and GitHub Advanced Security\n\nGitLab Ultimate differs from GitHub Advanced Security in the following ways:\n\n- GitLab natively provides additional code scanners such as container scanning, Dynamic Application Security Testing ([DAST](https://docs.gitlab.com/ee/user/application_security/dast/)), Web API fuzz testing, and more. These scanners are a mix of optimized proprietary and open source technologies with custom rulesets. For a full list, see the [GitLab AppSec documentation](https://docs.gitlab.com/ee/user/application_security/secure_your_application.html).\n- GitLab provides [granular security guardrails](https://docs.gitlab.com/ee/user/application_security/policies/) to prevent insecure code from being merged without approval.\n- GitLab security scanners can be run in [air-gapped or limited-connectivity environments](https://docs.gitlab.com/ee/user/application_security/offline_deployments/).\n- GitLab provides the [Compliance Center](https://docs.gitlab.com/ee/user/compliance/compliance_center/), which enables oversight of compliance violations across an entire organization.\n\nGitLab Ultimate also provides additional security and compliance capabilities, portfolio and value stream management, live upgrade assistance, and more. See the [GitLab Ultimate documentation](https://about.gitlab.com/pricing/ultimate/) to learn more about these additional features.\n\n## How to migrate a GitHub repository to GitLab\n\nGitLab provides a built-in importer, which allows you to import your GitHub projects from either GitHub.com or GitHub Enterprise to GitLab. The importer allows you to migrate not only the GitHub Repository to GitLab, but several other objects, including issues, collaborators (members), and pull requests. For a complete list of what can be migrated, see the [GitHub imported data documentation](https://docs.gitlab.com/ee/user/project/import/github.html#imported-data). You can perform the migration as follows:\n1. On the left sidebar, at the top, select **Create new (+)**.\n2. Select **New project/repository** under the **In GitLab** section.\n3. Select **Import project**.\n\n![Import project selection](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/1-Import-Project.png)\n\n4. Press the **GitHub** button.\n    - If using GitLab self-managed, then you must [enable the GitHub importer](https://docs.gitlab.com/ee/administration/settings/import_and_export_settings.html#configure-allowed-import-sources).\n    - Note that other importers can be initiated in the same way.\n5. Now, you can do one of the following:\n    - Authorize with GitHub Oauth by selecting **Authorize with GitHub**.\n    - Use a GitHub personal access token:\n       - Go to [https://github.com/settings/tokens/new](https://github.com/settings/tokens/new).\n       - In the **Note** field, enter a token description.\n       - Select the **repo** scope.\n       - Optionally, to import Collaborators, select the **read:org** scope.\n       - Press the **Generate token** button.\n       - On the GitLab import page, in the Personal Access Token field, paste the GitHub personal access token.\n6. Press the **Authenticate** button.\n7. Select the items you wish to migrate.\n8. Select the projects you wish to migrate and to where.\n9. Press the **Import** button.\n\nYour imported project should now be in your workspace. For additional guidance on migrating from GitHub to GitLab, watch this video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/0Id5oMl1Kqs?si=HEpZVy94cpfPfAky\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nYou can also perform the migration using a [GitHub personal access token](https://docs.gitlab.com/ee/user/project/import/github.html#use-a-github-personal-access-token) or the [GitLab REST API](https://docs.gitlab.com/ee/user/project/import/github.html#use-the-api). The importer also allows importing from other sources such as Bitbucket or Gitea. To learn more, read the [importer documentation](https://docs.gitlab.com/ee/user/project/import/).\n\n## How to migrate feature-by-feature\n\nLet’s go over how to leverage each feature provided by GitHub Advanced Security in GitLab Ultimate. You must have a [GitLab Ultimate license](https://about.gitlab.com/pricing/ultimate/) to continue. GitLab provides a [free 30-day trial](https://about.gitlab.com/free-trial/devsecops/) to get you started.\n\n### Code scanning\nGitHub provides code scanning to provide contextual vulnerability intelligence and advice for static source code. The same can be done within GitLab by enabling [SAST](https://docs.gitlab.com/ee/user/application_security/sast/). GitLab SAST scanners cover a wider set of programming languages and frameworks than GitHub’s [CodeQL](https://docs.github.com/en/code-security/code-scanning/introduction-to-code-scanning/about-code-scanning-with-codeql#about-codeql).\n\nTo enable code scanning in GitLab, you can simply add the [SAST template](https://docs.gitlab.com/ee/user/application_security/sast/#configure-sast-in-your-cicd-yaml) to your `.gitlab-ci.yml`:\n\n```yaml\ninclude:\n  - template: Jobs/SAST.gitlab-ci.yml\n```\n\nOnce the template has been added, any time new code is checked in, SAST will auto-detect the [programming languages](https://docs.gitlab.com/ee/user/application_security/sast/#supported-languages-and-frameworks ) used in your project. It will then scan the source code for known vulnerabilities.\n\n**Note:** Security scanners can also be added to your project using GitLab's [security configuration](https://docs.gitlab.com/ee/user/application_security/configuration/), which can automatically create a merge request to update your pipeline. To learn more, see the [Configure SAST by using the UI documentation](https://docs.gitlab.com/ee/user/application_security/sast/#configure-sast-by-using-the-ui).\n\nSAST results of the diff between the feature-branch and the target-branch display in the merge request widget. The merge request widget displays SAST results and resolutions that were introduced by the changes made in the merge request.\n\n![Security scanning in merge request](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/2-SAST-MR-View.png)\n\nEach vulnerability displays data to assist with remediation, including detailed description, severity, location, and resolution information:\n\n![SAST vulnerability details](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/3-SAST-MR-View-Detailed.png)\n\nYou can take action on these vulnerabilities:\n\n- **Dismiss vulnerability**: Allows a developer to dismiss the vulnerability with a comment. This assists the security team performing a review.\n- **Create issue**: Allows an issue to be created to keep track of a vulnerability that requires additional oversight.\n\nThese changes can also be seen inline when changing to the **Changes** view within the merge request.\n\n![SAST vulnerability changes view](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/4-SAST-MR-View-Changes.png)\n\n#### Customizing SAST scanners\n\nGitLab allows you to override a SAST job definition so you can change properties like variables, dependencies, or rules. You can do this by declaring a job with the same name as the SAST job to override. Then, place this new job after the template inclusion and specify any additional keys under it.\n\nFor example, the following configuration:\n- overwrites the version the `semgrep-sast` scanner uses\n- runs a script to fetch modules from private projects before running `gosec-sast`\n- configures all scanners to search at a maximum depth of 10\n\n```yaml\ninclude:\n  - template: Jobs/SAST.gitlab-ci.yml\n\nvariables:\n  SEARCH_MAX_DEPTH: 10\n\nsemgrep-sast:\n  variables:\n    SAST_ANALYZER_IMAGE_TAG: \"3.7\"\n\ngosec-sast:\n  before_script:\n    - |\n      cat \u003C\u003CEOF > ~/.netrc\n      machine gitlab.com\n      login $CI_DEPLOY_USER\n      password $CI_DEPLOY_PASSWORD\n      EOF\n```\n\n**Note:** The available SAST jobs can be found in the [`SAST.gitlab-ci.yml` template](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Jobs/SAST.gitlab-ci.yml). Configurations can be found in the [Available SAST CI/CD variables documentation](https://docs.gitlab.com/ee/user/application_security/sast/#available-cicd-variables).\n\n#### Customizing SAST rulesets\n\nFor each SAST analyzer, GitLab processes the code then uses rules to find possible weaknesses in source code. These rules determine what types of weaknesses the scanner reports.\n\n- For Semgrep-based SAST scanners, GitLab creates, maintains, and supports the rules that are used. It combines the Semgrep open source engine, GitLab-managed detection rules, and GitLab proprietary technology for vulnerability tracking and false positive detection.\n- For other SAST analyzers, the rules are defined in the upstream projects for each scanner.\n\nYou can customize the behavior of the SAST scanners by defining a ruleset configuration file in the repository being scanned:\n- Disable predefined rules (available for all analyzers)\n- Override predefined rules (available for all analyzers)\n- Replace predefined rules by synthesizing a custom configuration using passthroughs\n\nFor more information and examples on configuring SAST rules, see the [SAST rules](https://docs.gitlab.com/ee/user/application_security/sast/rules.html) and [Customizing rulesets documentation](https://docs.gitlab.com/ee/user/application_security/sast/customize_rulesets.html).\n\n### Secret scanning\n\nGitHub provides secret scanning, which can find, block, and revoke leaked secrets. The same can be done within GitLab by enabling [Secret Detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/).\n\nTo enable Secret Detection in GitLab, you can simply add the following template to your `.gitlab-ci.yml`:\n\n```yaml\ninclude:\n  - template: Jobs/Secret-Detection.gitlab-ci.yml\n```\n\nOnce the template has been added, any time new code is checked in (or a pipeline is run), the secret scanner will scan the source code for known secrets. Pipeline Secret Detection scans different aspects of your code, depending on the situation. For all methods except the “Default branch”, Pipeline Secret Detection scans commits, not the working tree. See the [Secret detection coverage documentation](https://docs.gitlab.com/ee/user/application_security/secret_detection/pipeline/#coverage) to learn more about how secret scanning works.\n\nWhen creating a merge request, Secret Detection scans every commit made on the source branch. Just like in SAST, each detected vulnerability provides the following information (such as location) and identifiers to assist with the remediation process:\n\n![Secret Detection vulnerability details](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/5-Secret-Detection-MR-Detailed.png)\n\nSimilar to SAST, you can take action on these vulnerabilities straight from the merge request, including dismissing vulnerabilities, and creating issues.\n\n#### Customizing Secret Detection jobs\n\nGitLab allows you to override a Secret Detection job definition so you change properties like variables, dependencies, or rules. You can do this by declaring a job with the same name as the Secret Detection job. Then place this new job after the template inclusion and specify any additional keys under it. For example, the following configuration:\n\n- overwrites the stage the secret detection job runs on to `security`\n- enables the historic scanning\n- changes the Secrets Analyzer version to 4.5\n\n```yaml\ninclude:\n  - template: Jobs/Secret-Detection.gitlab-ci.yml\n\nsecret_detection:\n  stage: security\n  variables:\n    SECRET_DETECTION_HISTORIC_SCAN: \"true\"\n    SECRETS_ANALYZER_VERSION: \"4.5\"\n```\n\n**Note:** The available Secret Detection jobs can be found in the [SAST.gitlab-ci.yml template](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml). Available configurations can be found in the [Available Secret Detection CI/CD variables documentation](https://docs.gitlab.com/ee/user/application_security/secret_detection/pipeline/#customizing-analyzer-settings).\n\n#### Customizing Secret Detection rulesets\n\nThe Secret Detection analyzer allows you to customize which secrets are reported in the GitLab UI. The following customization options can be used separately, or in combination:\n\n- disable predefined rules\n- override predefined rules\n- synthesize a custom configuration\n- specify a remote configuration file\n\nFor example, by creating the file `.gitlab/secret-detection-ruleset.toml`, in the root directory of your project, the default GitLeaks package is extended to ignore test tokens from detection:\n\n```yaml\n### extended-gitleaks-config.toml\ntitle = \"extension of gitlab's default gitleaks config\"\n\n[extend]\n### Extends default packaged path\npath = \"/gitleaks.toml\"\n\n[allowlist]\n  description = \"allow list of test tokens to ignore in detection\"\n  regexTarget = \"match\"\n  regexes = [\n    '''glpat-1234567890abcdefghij''',\n  ]\n```\n\nFor more information on overriding the predefined analyzer rules, check out the [Secret Detection documentation](https://docs.gitlab.com/ee/user/application_security/secret_detection/pipeline/#override-predefined-analyzer-rules).\n\n#### Automatic response to leaked secrets\n\nGitLab Secret Detection automatically responds when it finds certain types of leaked secrets. Automatic responses can:\n- automatically revoke the secret\n- notify the partner that issued the secret and the partner can then revoke the secret, notify its owner, or otherwise protect against abuse\n\nGitLab can also notify partners when credentials they issue are leaked in public repositories on GitLab.com. If you operate a cloud or SaaS product and you’re interested in receiving these notifications, you can implement a Partner API, which is called by the GitLab Token Revocation API.\n\nSee the [Automatic response to leaked secrets documentation](https://docs.gitlab.com/ee/user/application_security/secret_detection/automatic_response.html) to learn more.\n\n### Supply chain security\n\nGitHub enables you to secure, manage, and report on software supply chains with automated security and version updates and one-click SBOMs. GitLab can meet your supply chain security needs using the Dependency Scanning and Dependency List (SBOM) features.\n\nTo enable Dependency Scanning in GitLab, you can simply add the following template to your `.gitlab-ci.yml`:\n\n```yaml\ninclude:\n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml\n```\n\nOnce the template has been added, any time new code is checked in, Dependency Scanning will auto-detect the [package managers](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#supported-languages-and-package-managers) used in your project. It will then scan the dependencies used for known vulnerabilities.\n\nDependency Scanning results of the diff between the feature-branch and the target-branch display in the merge request widget. The merge request widget displays Dependency Scanning results and resolutions that were introduced by the changes made in the merge request. Within a merge request, each vulnerability displays relevant information to assist with remediation such as identifiers, evidence, and solutions:\n\n![Dependency Scanner vulnerability details](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/6-Dependency-Scanner-MR-View-Detailed.png)\n\nSimilar to SAST and Secret Detection, you can take action on these vulnerabilities straight from the merge request, including dismissing vulnerabilities and creating issues.\n\n#### Configuring Dependency Scanning\n\nTo override a job definition (for example, to change properties like variables or dependencies), declare a new job with the same name as the one to override. Place this new job after the template inclusion and specify any additional keys under it. For example, the following code:\n\n- disables automatic remediation of vulnerable dependencies\n- requires a build job to complete before Dependency Scanning\n\n```yaml\ninclude:\n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml\n\ngemnasium-dependency_scanning:\n  variables:\n    DS_REMEDIATE: \"false\"\n  dependencies: [\"build\"]\n```\n\nTo learn more about configuring the dependency scanners, see the [Customizing analyzer behavior documentation](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#customizing-analyzer-behavior).\n\n#### Generating an SBOM\n\nGitLab provides a Dependency List (SBOM) to review your project or group dependencies and key details about those dependencies, including their known vulnerabilities. This list is a collection of dependencies in your project, including existing and new findings. The Dependency List is generated after the dependency scanner runs successfully on the [default branch](https://docs.gitlab.com/ee/user/project/repository/branches/default.html). To access the Dependency List:\n\n1. On the left sidebar, select **Search or go to** and find your project.\n2. Select **Secure > Dependency List**.\n\n![Dependency list (SBOM)](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/7-Dependency-List.png)\n\nFrom here you can see the following information on your dependencies:\n\n| Field\t| Description |\n| ----- | ----------- |\n| Component\t| The dependency’s name and version. |\n| Packager | The packager used to install the dependency. |\n| Location | For system dependencies, this lists the image that was scanned. For application dependencies, this shows a link to the packager-specific lock file in your project that declared the dependency. It also shows the dependency path to a top-level dependency, if any, and if supported. |\n| License | Links to dependency’s software licenses. A warning badge that includes the number of vulnerabilities detected in the dependency. |\n| Projects | Links to the project with the dependency. If multiple projects have the same dependency, the total number of these projects is shown. To go to a project with this dependency, select the Project's number, then search for and select its name. The project search feature is supported only on groups that have up to 600 occurrences in their group hierarchy. |\n\n\u003Cp>\u003C/p>\n\nSee the [Dependency List documentation](https://docs.gitlab.com/ee/user/application_security/dependency_list/) to learn more.\n\n### Security and compliance administration\n\nGitHub Advanced Security allows you to view security metrics and insights and assess code security risk. Now let’s examine how to do the same with GitLab Ultimate.\n\n#### Viewing security metrics and insights\n\nGitLab provides [Security dashboards](https://docs.gitlab.com/ee/user/application_security/security_dashboard/) to help assess the security posture of your applications. These dashboards display a collection of metrics, ratings, and charts for the vulnerabilities detected by the security scanners run on your project:\n\n- vulnerability trends over a 30-, 60-, or 90-day timeframe for all projects in a group\n- a letter grade rating for each project based on vulnerability severity\n- the total number of vulnerabilities detected within the past 365 days, including their severity\n\nTo access the Security dashboard:\n\n1. On the left sidebar, select **Search or go to** and find your project or group.\n2. From the side tab, select **Secure > Security** dashboard.\n3. Filter and search for what you need.\n\nThe group view displays your security posture for all projects in your group:\n\n![Group Security dashboard](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/8-SD-Group.png)\n\nThe project view displays your security posture for just the project:\n\n![Project Security dashboard](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/9-SD-Project.png)\n\n#### Assess code security risk\n\nGitLab Ultimate features a [Vulnerability Report](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/), which provides information about vulnerabilities from scans of the default branch. It contains cumulative results of all successful jobs, regardless of whether the pipeline was successful. At all levels, the Vulnerability Report contains:\n\n- totals of vulnerabilities per severity level\n- filters for common vulnerability attributes\n- details of each vulnerability, presented in tabular layout\n\n![Vulnerability Report](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/10-Vulnerability-Report.png)\n\nClicking on a vulnerability enables access to its [Vulnerability Page](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/), which contains details of the vulnerability including a description, location, identifiers, and more. Below is an example of the Vulnerability Page for an SQL Injection vulnerability detected by our SAST scanner:\n\n![SQL Injection Vulnerability Page](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/11-Vulnerability-Page-1.png)\n\nFrom here the security team can collaborate by [changing the status of a vulnerability](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#change-the-status-of-a-vulnerability) along with a reason and [creating issues to better track changes](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#create-a-gitlab-issue-for-a-vulnerability).\n\nFrom the Vulnerability Page, you can also leverage [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our AI-powered suite of features, to explain the vulnerability and [automatically create a merge request that resolves the vulnerability](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#vulnerability-resolution).\nGitLab Duo's [Vulnerability Explanation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#vulnerability-explanation) uses a large language model to:\n\n- summarize the vulnerability.\n- help developers and security analysts to understand the vulnerability, how it could be exploited, and how to fix it\n- provide a suggested mitigation\n\n![SQL Injection GitLab Duo AI explanation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/13-Explain-Vulnerability.png)\n\n## Additional GitLab Ultimate security features\n\nGitLab Ultimate contains many more security features that cannot be found within GitHub Advanced Security. A few examples of these additional security features are: additional security scanners for the complete software development lifecycle (SDLC), granular security guardrails, and custom permissions.\n\n### Security scanners for the entire SDLC\n\nOur portfolio of security scanners extends spans the SDLC.\n\n| Scanner Name | Scans | Languages/Files scanned |\n|  -------------- | ----- | ------------------------- |\n| [Static Application Security Testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/) | Static source code | C/C++, Java, Python, Go, JavaScript, C#, and more |\n| [Dynamic Application Security Testing (DAST)](https://docs.gitlab.com/ee/user/application_security/dast/) | Running web application, live API | Language-agnostic |\n| [Infrastructure as Code (IaC) Scanning](https://docs.gitlab.com/ee/user/application_security/iac_scanning/) | IaC files |Terraform, AWS Cloud Formation, Ansible, and more |\n| [Container Scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/) | Static and running container images | Dockerfile |\n| [Dependency Scanning and License Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/) | Application dependencies | Requirements.txt, Yarn, Gradle, Npm, and more |\n| [Web API Fuzz Testing](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/) | Sends random/malformed data to web-api | OpenAPI, GraphQL, HAR, Postman Collection |\n| [Coverage-guided Fuzz Testing](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/) | Sends random/malformed data to function | C/C++, Go, Swift, Python, Rust, Java, JavaScript, AFL |\n\n\u003Cp>\u003C/p>\n\nGitLab also allows you to integrate [third-party scanners](https://about.gitlab.com/blog/integrate-external-security-scanners-into-your-devsecops-workflow/) and [custom scanners](https://about.gitlab.com/blog/how-to-integrate-custom-security-scanners-into-gitlab/) into the platform. Once integrated, the scanner results are automatically presented in various places in GitLab, such as the Pipeline view, merge request widget, and Security dashboard. See the [Security Scanner Integration documentation](https://docs.gitlab.com/ee/development/integrations/secure.html) to learn more.\n\n### Granular security and compliance policies\n\nPolicies in GitLab provide security and compliance teams with [a way to enforce controls globally in their organization](https://about.gitlab.com/blog/meet-regulatory-standards-with-gitlab/). Security teams can ensure:\n\n- security scanners are enforced in development team pipelines with proper configuration\n- all scan jobs execute without any changes or alterations\n- proper approvals are provided on merge requests based on results from those findings\n\n![Merge Request Security Policies](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/14-MR-Policy.png)\n\nCompliance teams can centrally enforce multiple approvers on all merge requests and ensure various settings are enabled on projects in scope of organizational requirements, such as enabling or locking merge request and repository settings. To learn more see the [GitLab Security Policy](https://docs.gitlab.com/ee/user/application_security/policies/) documentation.\n\n### Custom roles and granular permissions\n\n[GitLab Ultimate provides custom roles](https://about.gitlab.com/blog/how-to-tailor-gitlab-access-with-custom-roles/), which allow an organization to create user roles with the precise privileges and permissions required for that organization’s needs.\n\nFor example, a user could create a “Security Auditor” role with permissions to view security vulnerabilities in the system, but not be able to view source code, nor perform any changes within the repository. This granular set of permissions enables well-defined separation of duties.\n\n![Custom role creation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/15-Custom-Roles.png)\n\nTo learn more see the [Custom Roles](https://docs.gitlab.com/ee/user/custom_roles.html) and [available Granular Permissions](https://docs.gitlab.com/ee/user/custom_roles/abilities.html) documentation.\n\n### Compliance Center\n\nThe Compliance Center is the central location for compliance teams to manage their compliance standards’ adherence reporting, violations reporting, and compliance frameworks for their group. The Compliance Center includes the following:\n\n- [Compliance standards adherence dashboard](https://docs.gitlab.com/ee/user/compliance/compliance_center/compliance_standards_adherence_dashboard.html) lists the adherence status of projects complying to the GitLab standard.\n- [Compliance violations report](https://docs.gitlab.com/ee/user/compliance/compliance_center/compliance_violations_report.html) shows a high-level view of merge request activity for all projects in the group.\n- [Compliance frameworks report](https://docs.gitlab.com/ee/user/compliance/compliance_center/compliance_frameworks_report.html) shows all the compliance frameworks in a group.\n- [Compliance projects report](https://docs.gitlab.com/ee/user/compliance/compliance_center/compliance_projects_report.html) shows the compliance frameworks that are applied to projects in a group.\n\n![Compliance Center](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/16-Compliance-Center.png)\n\nThese dashboards assist with making sure separation of duties is being followed to optimize compliance within your organization. To learn more see the [Compliance Center documentation](https://docs.gitlab.com/ee/user/compliance/compliance_center/).\n\n## Read more\n\nThis article covers only a portion of the wide range of security features GitLab Ultimate offers. Check out these resources to learn more about how GitLab Ultimate can help enhance your organizational security and developer efficiency:\n\n- [Why GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/)\n- [Getting Started with DevSecOps Tutorial](https://gitlab-da.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/)\n- [Getting Started with DevSecOps Sample Project](https://gitlab.com/gitlab-da/tutorials/security-and-governance/devsecops/simply-vulnerable-notes)\n- [Import your project from GitHub to GitLab documentation](https://docs.gitlab.com/ee/user/project/import/github.html)\n- [Migrating from GitHub Actions documentation](https://docs.gitlab.com/ee/ci/migration/github_actions.html)\n- [Tutorial: Create and run your first GitLab CI/CD pipeline](https://docs.gitlab.com/ee/ci/quick_start/)\n- [Tutorial: Create a complex pipeline](https://docs.gitlab.com/ee/ci/quick_start/tutorial.html)\n- [CI/CD YAML syntax reference](https://docs.gitlab.com/ee/ci/yaml/)",[765,9,681,479,766],"tutorial","testing",{"slug":768,"featured":91,"template":685},"migration-guide-github-advanced-security-to-gitlab-ultimate","content:en-us:blog:migration-guide-github-advanced-security-to-gitlab-ultimate.yml","Migration Guide Github Advanced Security To Gitlab Ultimate","en-us/blog/migration-guide-github-advanced-security-to-gitlab-ultimate.yml","en-us/blog/migration-guide-github-advanced-security-to-gitlab-ultimate",{"_path":774,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":775,"content":781,"config":787,"_id":789,"_type":14,"title":790,"_source":16,"_file":791,"_stem":792,"_extension":19},"/en-us/blog/multi-cloud-security",{"title":776,"description":777,"ogTitle":776,"ogDescription":777,"noIndex":6,"ogImage":778,"ogUrl":779,"ogSiteName":670,"ogType":671,"canonicalUrls":779,"schema":780},"A brief guide to multicloud security","Five challenges and seven best practices to consider for your multicloud strategy.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679136/Blog/Hero%20Images/multi-cloud-security.jpg","https://about.gitlab.com/blog/multi-cloud-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A brief guide to multicloud security\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-11-21\",\n      }",{"title":776,"description":777,"authors":782,"heroImage":778,"date":783,"body":784,"category":678,"tags":785},[675],"2019-11-21","\nMany agree that multicloud is worth the risk.\n\nThe multicloud trend has taken hold in recent years, with [RightScale finding\nthat 84% of enterprises run a multicloud strategy](https://www.flexera.com/blog/cloud/2019/02/cloud-computing-trends-2019-state-of-the-cloud-survey/). With multicloud,\norganizations deploy applications across two or more cloud platforms, like\nAWS, Azure, or Google Cloud.\n\nIncreased flexibility is one of the biggest appeals of a [multicloud strategy](/topics/multicloud/).\nCompanies avoid vendor lock-in by deploying workloads to different cloud platforms\nbased on cost and application needs. Hyperscale cloud vendors have data centers\nacross the globe, so organizations are able to control their cloud expenditures\nby scheduling workloads based on location and local time. Multicloud also\nprotects business operations by reducing down time, and improving resilience in\nthe event of an outage or workload-disruptive breach (like a DDoS attack).\n\nHowever, multicloud still has drawbacks that require careful consideration.\nThe increased complexity of a multicloud environment exponentially increases\nan organization’s attack surface and level of risk. Most of these risks can be\nmitigated with a thorough assessment and strategy addressing security needs –\nand as [a study from IDG and IBM has found](https://www.ciosummits.com/Online_Assets_IBM_Whitepaper_-_Multi-cloud_Organizations_Confront_IT_Security_Challenges.pdf),\n70% of survey respondents agreed that the benefits of multicloud outweigh the risks.\n\nThat being said, there’s a lot to consider. In this blog, we’ll run through\nsome of the top security challenges of multicloud, and dig into the strategies\nto conquer them. If you're short on time, feel free to skip down to the best\npractices section.\n\n### Key security challenges and how to manage them\n\n#### Access and permissioning\n\nMulticloud adds complexity to your identity and access management efforts.\nEmployees need access to multiple cloud services as part of their daily work,\nand will access your data from a multitude of locations and devices. We\nrecommend you take a Zero Trust approach here: Allow access on an as-needed\nbasis, and no more. Data classification levels can help you streamline access\ndeterminations across different clouds, but the key idea is that limited access\nwill both protect your most mission critical and sensitive information, and\nallow you a clear view of when (and by whom) that information is accessed.\n\n#### Staying up to date\n\nWhile this is a security concern for any cloud use, upgrades and patching in\nmulticloud are more challenging because the vulnerabilities and mitigations\nfrom each cloud service provider are different. Multicloud complexity also\nmakes it difficult to keep track of vulnerabilities as applications communicate\nacross multiple clouds. [Mike Bursell from RedHat\ncalls this need “workload freshness”](https://enterprisersproject.com/article/2019/10/multi-cloud-security-issues-watch) – and suggests that this might require you\nto upgrade or patch in place, restart the workload with the latest image, or\ncheck and reload recent dependencies, in order to maintain the most recent\nversions of any dependent libraries, middleware, or executables.\n\n#### A disjointed view of security\n\nMost cloud vendors offer native tools to help you manage security within their\ncloud platform, and most of those tools can’t be applied to other vendors. This\ndisjointed approach to monitoring makes it difficult to gain a thorough\nunderstanding of all the vulnerabilities present in your infrastructure.\n\nInstead of making piecemeal security sense, adopt a multicloud management tool\nthat serves as a single pane of glass into all the happenings across all of your\ncloud platforms. Bursell notes that any monitoring tool needs to be fully aware\nof the scope of your deployment. It’s also important to have regular, if not\nreal-time, updates to your data view so that you’re aware of unusual changes or\nactivities and can address attacks as they come in. A centralized tool is also\nvaluable for conducting forensic analysis of your systems in the event of a\nlate-discovered breach.\n\n#### Control plane complexity\n\nRedHat’s Bursell defines the control plane as any communication which controls\nyour applications or how they are run. In addition to securing communications\nbetween and within applications, all scheduling, monitoring, and routing\ncommunications should also be encrypted. It’s critical to secure the\nadministration, logging, and audit functionality of your applications\n(lest you want to give hackers the opportunity to take down your entire\ninfrastructure). [David Locke of World Wide Technology writes\nthat security functionality and enforcement needs to be uniform within all of\nyour cloud environments](https://www.datacenterdynamics.com/opinions/security-challenges-multicloud-evolution/), allowing those functions to communicate and coordinate\nbetween themselves and support security automation.\n\n#### Application hardening\n\nWhen hardening your infrastructure, Bursell recommends knowing what APIs are\nexposed, understanding what controls you have on them, and planning what\nmitigations you can apply if they come under attack. [Tripwire notes that\nany software that your organization develops or acquires](https://www.tripwire.com/state-of-security/security-data-protection/cloud/multi-cloud-security-best-practices-guide/) from a third party must\nbe patched and security hardened by your organization.\n\n### Best practices\n\nNeed a TL;DR? We’ve got you covered:\n\n**Key security capabilities and strategies:** Multi-factor authentication,\ncloud workload security, security analytics, encryption, identity and access\nmanagement, cloud security gateways, microsegmentation, threat modeling,\nthreat intelligence, and endpoint detection and response.\n\n**Keep things consistent:** Develop a set of security policies and procedures\nto enforce on all of your clouds (and any on-prem software too, for that matter).\nWhile there will almost always be some kind of incompatibility, a benchmark or\nstandardized security policy will reduce the risk of oversights.\n\n**Cloud agnostic software:** Use security tools that can easily integrate with\nany cloud service, and that can scale with increased apps and workloads.\n\n**Go beyond your CSP’s tools:** Your cloud providers have tools to keep their\nofferings safe, but protection of the data itself falls to you. Some vendors\nmay be able to advise which capabilities you need within their infrastructure\nto keep your data safe.\n\n**Confidential computing:** Data protection usually focuses on data at rest and\nin transit, but what about data in use? Protect data as it is being processed,\nand always know _where_ the data is being used. Confidential computing will\nallow encrypted data to be processed in memory without exposing it to the rest\nof the system. This is a relatively new area, so consider keeping tabs on\nthe [Confidential Computing Consortium](https://confidentialcomputing.io/) to\nstay in the loop.\n\n**Anticipate unforeseen changes:** Planning for the unknown seems like an\noxymoron – but in tech, it’s not. Things change constantly, and often in ways\nwe don’t predict. Make sure your systems and environments can adapt to whatever\nthe market throws at you.\n\n**Stay informed of new computing trends:** For instance, [Nick Ismail from\nInformation Age highlights that serverless computing adoption is growing](https://www.information-age.com/business-multicloud-strategy-123471227/) as it allows cloud\ninstances to be scaled and patched instantly, and machine learning will be able\nto help servers identify patterns of malicious behavior and respond faster than\nhuman administrators can respond.\n\n## Looking ahead\n\nJust like every market, cloud will continue to change as vendors make new\nalliances and focus on new capabilities. In 2020, [Forrester predicts](https://go.forrester.com/blogs/predictions-2020-cloud/)\nthat hyperscale global public cloud leaders will form more alliances, while\ncloud management vendors will shift their focus to security – after a\nhigh-visibility data breach. Take steps to ensure that that breach isn’t yours\nby assessing the current and future state of your cloud strategy, and infusing\nsecurity into everything you do.\n\nCover image by [Michael Weidner](https://unsplash.com/@michaelbweidner) on [Unsplash](https://unsplash.com/photos/h-rP5KSC2W0).\n{: .note}\n",[681,9,786],"cloud native",{"slug":788,"featured":6,"template":685},"multi-cloud-security","content:en-us:blog:multi-cloud-security.yml","Multi Cloud Security","en-us/blog/multi-cloud-security.yml","en-us/blog/multi-cloud-security",{"_path":794,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":795,"content":801,"config":806,"_id":808,"_type":14,"title":809,"_source":16,"_file":810,"_stem":811,"_extension":19},"/en-us/blog/questions-regarding-our-zero-trust-efforts",{"title":796,"description":797,"ogTitle":796,"ogDescription":797,"noIndex":6,"ogImage":798,"ogUrl":799,"ogSiteName":670,"ogType":671,"canonicalUrls":799,"schema":800},"We answer your most popular questions about our Zero Trust journey","From why we chose Okta to issues around data fluidity, here are answers to your most-asked ZT questions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681098/Blog/Hero%20Images/lysander-yuen-wk-ztn-unsplash.jpg","https://about.gitlab.com/blog/questions-regarding-our-zero-trust-efforts","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We answer your most popular questions about our Zero Trust journey\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Loveless\"}],\n        \"datePublished\": \"2020-02-19\",\n      }",{"title":796,"description":797,"authors":802,"heroImage":798,"date":803,"body":804,"category":681,"tags":805},[719],"2020-02-19","\n\n_It’s been a busy few months since my last blog post on our Zero Trust efforts, [\"Zero Trust at GitLab: Where do we go from here?\"](/blog/zero-trust-at-gitlab-where-do-we-go-from-here/). Since then I’ve done a few [press interviews](https://www.digi.no/artikler/zero-trust-du-ikke-kan-basere-deg-bare-pa-en-leverandor-for-a-lose-det/484170) and spoken at security conferences (most recently at [ShmooCon 2020](https://www.shmoocon.org/speakers/#0trust)) on the topic of Zero Trust. I’ve been transparent about GitLab’s implementation of security and our pursuit of Zero Trust ideas. I received many questions about Zero Trust at ShmooCon, both at the end of the talk and in the hallways after. I thought I’d pass on a few of those questions with some answers since many people are interested in the actual implementation of the ideas. It’s also a good way to show what happens when a well-meaning concept meets harsh reality._\n\nWarning: Video contains some strong language\n{: .note}\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/vI7_M04qpJ4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\nI discussed data classification challenges and specifically the fluidity of data in my ShmooCon talk and was met with a lot of hallway questions. More than one person asked for an example and wanted my opinion on how to classify the data. Hence this far-reaching question:\n\n### What do you mean that there are issues with the \"fluidity\" of data when it comes to data classification?\n\nIn an earlier blog post I did give an example of the fluidity of data, specifically when I talked about the movement of data in a section called (appropriately) [\"Movement of data\"](/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge/). Data fluidity is an issue because access to data is usually defined and enforced at the time of authentication. Waiting until the authentication stage causes problems if the data stays stationary while its classification changes rapidly. If you authenticate in the morning and have access to YELLOW data, but that data’s classification changes to RED in the afternoon, how do we enforce access controls if the user is only allowed access to YELLOW data? It is possible that you could move the data to a location intended for RED access only, or assign a user some type of access token specific to data based upon that data’s classification. But, you will need a solution that will scale and will additionally need to worry about whether data is being cloned, copied or archived. This is a question where it is much easier to explain the issue than solve it, and we are still looking for answers.\n\nThe concept of zones is a stop-gap until a better solution can be implemented. A data zone is comprised of data of different classifications, but with a single allow/deny method of access control. In other words, a data zone that contains both ORANGE and YELLOW data is ranked as an ORANGE ZONE since that is the highest level of data contained within it. Since we cannot specify granular access to the ORANGE ZONE resource, someone with YELLOW access cannot access the YELLOW data inside the ORANGE ZONE. The goal is to eliminate the zones so that we can define granular access to data. Most of the zones are set up to accommodate legacy systems into the data classification scheme and need to eventually be eliminated. This is, of course, a common problem in information technology – how do we move off of old systems onto new systems without disrupting existing processes and procedures. GitLab is very fortunate in that we have very few data zones compared to most companies, but it is always a problem when we encounter them.\n\nThe more advanced problem is that most technologies assign authorization to access data based upon the moment of initial user authentication. We want to eliminate data zones and we want to eliminate complexity. Making copies of data and storing a YELLOW version in one place and a RED version in another complicates things. Using an automated process that allows a non-privileged user to see privileged data also complicates things. The good news is that we are far enough in the Zero Trust process that we are dealing with this challenge. The bad news is we don’t have an answer yet but we’re still searching for something that works.\n\n\nWe get questions about our choice of vendors, mainly our choice of Okta as a major vendor for Zero Trust. Most organizations find it difficult to accept an approach where there is little or no competition in certain arenas, and in hallway conversations, people seemed alarmed that they’d be putting all of their digital eggs into a single digital basket. Some people have asked for an explanation as to why we are putting all of our end user identity in one basket:\n\n### You’re using Okta, what other tools did you look at? What didn’t meet your criteria?\n\nWe were looking for an [identity-management system](https://en.wikipedia.org/wiki/Identity-management_system) (IMS) that allows us to positively identify users during the authentication process. The IMS needed to have multi-factor authentication (MFA) capability and be able to support a lot of SaaS products. Okta gave us this and had a lot more features we’ve since started using. We also looked at products that mainly did MFA, but it was meeting those critical items along with a lot of extras we could take advantage of that clinched it.\n\nThe flexibility of Okta and the ability to implement something more than one way based upon user need was an unexpected benefit – MFA is an example. Some of our team members agreed to use U2F in the form of Yubikeys. This worked great, although some team members expressed concerns about possibly losing the keys or worried about the risk of leaving a low profile Yubikey plugged in all the time in case the entire laptop was lost or stolen. Since Okta’s MFA solutions also included the Okta Verify phone app that supported \"push\" technology, we could allow team members to have a choice in MFA methods. Team members could use the Yubikey or the push technology based upon what best suited their workflow, and we were able to get MFA implemented with team members actually using it. Allowing us to give team members a choice instead of simply forcing a method upon them leads to a happier adoption process, quicker overall implementation, and of course, a more secure work environment.\n\nMost vendors don’t offer the level of flexibility Okta does with their products or allow for that level of granularity with features when it comes to identity management, so there really were not a lot of other choices. Add in support for provisioning and de-provisioning for dozens of SaaS applications and it was obvious we’d get a great ROI.\n\n### How do you separate the hype from the fact when looking at Zero Trust?\n\nFirst off, for our implementation, we just identified what we wanted out of a security system that granted access to users, systems, and data. You can’t just say \"we want Zero Trust\" because every vendor claims to sell Zero Trust solutions. We used the [BeyondCorp paper](https://cloud.google.com/beyondcorp/) as an example of Google doing something for themselves, and not as a blueprint for us. We just looked for products that met our \"must-have\" list, and if it had a lot of \"nice to haves\" available that was great. It was even better if it had useful features we hadn’t even considered. So we ended up with Okta as a cornerstone for user identity and authentication, and now all products need to speak Okta, or at least support the protocols that Okta supports. That makes it easy, or at least easier to make things work together if we define a common bit of criteria - every solution must tie into Okta.\n\nThe hard part is that user identity and authentication is only one part of the picture. We need to do end-user device identity and authentication. We need to assign identity to running processes, including those kicked off by users, and those fully automated and triggered by events. And, getting into non-Zero Trust territory but still very much in line with our goals, we want to be able to audit all of our controls. We want to be able to log everything and search those logs for anomalies. Therefore we have to make sure that any Zero Trust solution can support auditing and logging.\n\n\n**What do you want to know? Do you have your own questions? Let us know!**\nWe’re still moving forward as our Zero Trust implementation is a work in progress. As we hit milestones, we will continue to update you with new blogs with hopefully new solutions and processes that work. Right now we’re deploying a solution for SSH by using Okta ASA, and we’re still tackling our asset management, so expect news from those fronts in upcoming blog posts!\n\n\n\nCover image by [Lysander Yuen](https://unsplash.com/@lysanderyuen?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com).\n{: .note}\n",[723,681,9],{"slug":807,"featured":6,"template":685},"questions-regarding-our-zero-trust-efforts","content:en-us:blog:questions-regarding-our-zero-trust-efforts.yml","Questions Regarding Our Zero Trust Efforts","en-us/blog/questions-regarding-our-zero-trust-efforts.yml","en-us/blog/questions-regarding-our-zero-trust-efforts",{"_path":813,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":814,"content":820,"config":827,"_id":829,"_type":14,"title":830,"_source":16,"_file":831,"_stem":832,"_extension":19},"/en-us/blog/rise-of-protestware",{"title":815,"description":816,"ogTitle":815,"ogDescription":816,"noIndex":6,"ogImage":817,"ogUrl":818,"ogSiteName":670,"ogType":671,"canonicalUrls":818,"schema":819},"Protestware threats: How to protect your software supply chain","Some people protest for change by changing code others depend on throughout the software supply chain. Learn more about protestware, its impact, and how to protect against it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669673/Blog/Hero%20Images/engineering.png","https://about.gitlab.com/blog/rise-of-protestware","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Protestware threats: How to protect your software supply chain\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2023-05-09\",\n      }",{"title":815,"description":816,"authors":821,"heroImage":817,"date":823,"body":824,"category":825,"tags":826},[822],"Abubakar Siddiq Ango","2023-05-09","\n\nIn 2016, the continuous integration (CI) pipelines of millions of projects failed because a developer decided to pull their projects from npm package registry in [protest of a request to take down or rename one of their packages](https://www.theregister.com/2016/03/23/npm_left_pad_chaos/). In January 2022, the maintainer of the widely used 'colors' and 'faker' packages on the npm registry modified [these projects](https://blog.sonatype.com/npm-libraries-colors-and-faker-sabotaged-in-protest-by-their-maintainer-what-to-do-now?hsLang=en-us), adding malicious code that infinitely printed gibberish in protest of corporations who use open source projects without giving back. These are two examples of \"protestware,\" a term that refers to software packages or applications that have been intentionally modified to send a political message. The impacts may range from seeing unexpected messages in a terminal or logs when building an application to serious adverse impacts like data deletion. \n\nWhile protestware remained rare for a long time, recent high-profile incidents have brought it back into the spotlight. Similar code injection variants like [typosquatting](https://www.kaspersky.com/resource-center/definitions/what-is-typosquatting) packages (as in the case of the [colors npm](https://www.mend.io/resources/blog/new-typosquating-attack-on-npm-package-colors-using-cross-language-technique-explained/) package, where bad actors created compromised clones of packages with similar names) and compromised packages (as in the case of the [ctx PyPI packages](https://www.theregister.com/2022/05/24/pypi_ctx_package_compromised/)) are usually perpetrated by bad actors looking to cause harm. Protestware is unusual in that the custodians of projects trusted by the community have allowed or made these changes. Regardless of whether the changes' impacts are harmful, such changes raise ethical concerns and can create unwanted distractions. These risks also reinforce the need for open source consumers to adopt a [zero trust security model](/blog/why-devops-and-zero-trust-go-together/) for their software supply chain. Trust, but verify!\n\nThe world is going through unprecedented movements demanding change, and change seekers will find new and often disruptive ways to be heard, as we have seen in the case of everything from climate activism to TikTok challenges. Software supply chains are not exempt and, as we have learned from past incidents, being proactive is key to staying secure.\n\nHere are some steps you can take to protect your software supply chain by ensuring your dependencies are secure.\n\n## Implement dependency scanning\n\n[Dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/) is now an industry standard, and there is no shortage of tools or libraries to scan your packages, containers, or any other binary formats for vulnerabilities. Using GitLab CI’s [`rules:exists`](https://docs.gitlab.com/ee/ci/yaml/#rulesexists) rule, GitLab checks for the presence of certain files to determine the appropriate scans to check for vulnerabilities. Coupled with [Vulnerability Reports](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/), [Policy Management](https://docs.gitlab.com/ee/user/application_security/policies/index.html#policy-management), and the [Security Dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/index.html), your security team and organization can stay ahead of vulnerabilities. To include dependency scanning in your CI pipeline, add the following lines to your `.gitlab-ci.yml` file. You can explore the [Dependency Scanning CI template](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Jobs/Dependency-Scanning.gitlab-ci.yml) to understand how it works. \n\n```\n\ninclude:\n\n  template: Jobs/Dependency-Scanning.gitlab-ci.yml\n\n```\n\nRunning the CI script against an example [Ruby on Rails project](https://gitlab.com/gitlab-de/playground/ruby-rails-demo) with Ruby 3.0.4, the [Vulnerability Report](https://gitlab.com/gitlab-de/playground/ruby-rails-demo/-/security/vulnerability_report/?scanner=GitLab.DEPENDENCY_SCANNING) shows more than 70 vulnerabilities detected for the dependencies in the project’s [Gemfile](https://gitlab.com/gitlab-de/playground/ruby-rails-demo/-/blob/master/Gemfile).\n\n\n![Vulnerability Report Image](https://about.gitlab.com/images/blogimages/2023-04-rise-of-protestware/vulnerability-report.png \"Vulnerability Report Image\")\n\n\n## Generate provenance validations\n\nUsers of packages can verify they are not downloading a compromised version using [artifact attestation](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#artifact-attestation), which was [introduced in GitLab Runner 15.1](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28940/). Attestation metadata is generated in the [in-toto format](https://github.com/in-toto/attestation); it provides [provenance](https://slsa.dev/provenance/v0.2) attesting to how a binary was built, and you can verify the artifacts against the provenance. This allows you to achieve [Level 2](/blog/achieve-slsa-level-2-compliance-with-gitlab/) of the Supply-chain Levels for Software Artifacts ([SLSA](https://slsa.dev/)) security framework. \n\nThe demo video below shows how to configure your CI script to generate artifact attestation metadata.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/MlIdqrDgI8U\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\n## Utilize private registries\n\n[Self-hosting registries](https://docs.gitlab.com/ee/user/packages/) for packages, container images, or your Terraform modules are a more secure way of ensuring secure and vetted packages are used by your team. Security and compliance teams are enabled to ensure total control of the dependencies used in the entire organization and how they are accessed with [package registry permissions](https://docs.gitlab.com/ee/user/packages/package_registry/index.html#package-registry-visibility-permissions). GitLab supports container, infrastructure, and package registries. Package registries supported include Composer (PHP), Conan (C/C++), Generic, Maven (Java), npm (NodeJS), NuGet (Windows packaging), PyPI (Python), and RubyGems (Ruby).\n\n## Enable Dependency Proxy\nThe [Dependency Proxy](https://docs.gitlab.com/ee/user/packages/dependency_proxy/index.html) reduces the number of requests made to upstream dependency registries by acting as a local proxy. This reduces the impact of changes or vulnerabilities in the upstream packages, as a clean version will still be stored in the Dependency Proxy’s cache. This offers faster build times, since the cache is most likely closer to the build system that needs the image, and it ensures continuity when an upstream registry is having downtime or enforcing rate limits — as in the case of [Docker Hub](https://docs.docker.com/docker-hub/download-rate-limit/), which has a limit of 100 container image pulls per 6 hours per IP address container image for anynomous users as of the time of writing this article.\n\nYou can enable Dependency Proxy in the Packages and Registries section of a group’s settings. Only an administrator can enable/disable the Dependency Proxy for a GitLab instance. \n\n![Dependency Proxy setting image](https://about.gitlab.com/images/blogimages/2023-04-rise-of-protestware/dependency-proxy.png \"Dependency Proxy Setting Image\")\n\n\nTo use the Dependency Proxy in your CI script, you can use the `CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX` predefined variable as shown below:\n\n```\n\n# .gitlab-ci.yml\n\nimage: ${CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX}/ubuntu:latest\n\n```\n\nThe GitLab Runner automatically authenticates with the Dependency Proxy, but if your use case requires manual authentication, like building container images, you can use other predefined CI/CD variables as detailed in the [documentation](https://docs.gitlab.com/ee/user/packages/dependency_proxy/index.html).\n\nGitLab is also working on leveraging the Dependency Proxy to give more control to security teams with the [Dependency Firewall](https://about.gitlab.com/direction/package/#dependency-firewall), which will allow for control of how upstream packages are used and how they impact the organization. Package validation and version management can be managed from a central location without impacting the workflow of users.\n\nProactively instrumenting your software development lifecycle to ensure continuous review of your application along with controls is critical to keeping your software supply chain secure and preventing production problems due to protestware.\n","devsecops",[744,9,681,479],{"slug":828,"featured":6,"template":685},"rise-of-protestware","content:en-us:blog:rise-of-protestware.yml","Rise Of Protestware","en-us/blog/rise-of-protestware.yml","en-us/blog/rise-of-protestware",{"_path":834,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":835,"content":841,"config":847,"_id":849,"_type":14,"title":850,"_source":16,"_file":851,"_stem":852,"_extension":19},"/en-us/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab",{"title":836,"description":837,"ogTitle":836,"ogDescription":837,"noIndex":6,"ogImage":838,"ogUrl":839,"ogSiteName":670,"ogType":671,"canonicalUrls":839,"schema":840},"The ultimate guide to least privilege access with GitLab","This tutorial demonstrates how to achieve least privilege access using custom roles, security policies, compliance pipelines, branch protections, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099438/Blog/Hero%20Images/Blog/Hero%20Images/built-in-security_built-in-security.jpeg_1750099438377.jpg","https://about.gitlab.com/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The ultimate guide to least privilege access with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2024-03-06\",\n      }",{"title":836,"description":837,"authors":842,"heroImage":838,"date":843,"body":844,"category":681,"tags":845},[761],"2024-03-06","The principle of least privilege ([PoLP](https://csrc.nist.gov/glossary/term/least_privilege)) is a concept in which a user's access rights should be limited to the bare minimum needed for them to complete the tasks required within their respective roles. By implementing PoLP you can enhance your organization's [security posture](https://csrc.nist.gov/glossary/term/security_posture), complementing zero trust, in the following ways:\n\n- **Reduction of attack surface:** If credentials are compromised, the breach will be limited to only the paths where the compromised account has access.\n- **Protection against human error:** Users will not be able to perform actions that are not required for their role.\n- **Adherence to compliance:** Separation of duties and least privilege best practices are required for several compliance mandates such as SOC2 and HIPAA.\n- **Reduced system downtime:** By preventing everyone from accessing critical parts of the software development lifecycle (SDLC), there is less likelihood of downtime.\n\nGitLab provides a variety of different features that allow you to customize the actions a user can perform which assist in the achievement of PoLP. These features include:\n\n- **[Custom roles and granular security permissions](#custom-roles-and-granular-security-permissions):** Allows creation of roles with permissions that are specific to particular functions required by the organization.\n- **[Security policies](#security-policies):** Allows policies to be created that prevent insecure code from being merged into production branches without approval, and run security scanners regardless of your pipeline definition.\n- **[Branch protections and Code Owners](#branch-protections-and-code-owners):** Imposes further restrictions on certain branches to control permissions such as who can merge, push, etc. to defined branches.\n- **[Compliance pipelines and frameworks](#compliance-pipelines-and-frameworks):** Identifies that your project has certain compliance requirements or needs additional oversight, enforcing a pipeline configuration to the projects on which it is applied.\n\nIn this blog post, you'll learn each of the features mentioned, how they improve your organization's security posture, as well as how to implement them.\n\nWatch my video, which introduces you to achieving PoLP with GitLab:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/jvZ3eqWMeSY?si=DedSYiBNy2kTLJKo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Custom roles and granular security permissions\n\nGitLab allows you to create [custom roles](https://docs.gitlab.com/ee/user/custom_roles.html), which apply additional permissions to base roles to meet the security needs of your organization. The available [base roles](https://docs.gitlab.com/ee/user/permissions.html#roles) are as follows:\n\n- Guest\n- Reporter\n- Developer\n- Maintainer\n- Owner\n\nEach base role applies a particular set of permissions to a user. Base roles apply different permissions for [group members](https://docs.gitlab.com/ee/user/permissions.html#group-members-permissions), [project members](https://docs.gitlab.com/ee/user/permissions.html#project-members-permissions), and in [project features](https://docs.gitlab.com/ee/user/permissions.html#project-features-permissions). For example, the table below shows which roles can view the project [dependency list](https://docs.gitlab.com/ee/user/application_security/dependency_list/):\n\n| Base role    | Can view project dependency list     |\n| ---------- | ---------- |\n| Guest      | ❌       |\n| Reporter      | ❌       |\n| Developer      | ✅       |\n| Maintainer      | ✅       |\n| Owner       | ✅     |\n\n\u003Cbr>\u003C/br>\nThe dependency list also known as a software bill of materials ([SBOM](https://about.gitlab.com/blog/the-ultimate-guide-to-sboms/)), displays your project's dependencies\nand key details about those dependencies. It makes sense that only those actively working on a project should\nbe able to see what dependencies are present to limit any exploitation of your application using its dependencies.\n\nHowever, there are cases in which a Guest may need to see the SBOM to assist the organization in\n[achieving compliance](https://www.whitehouse.gov/briefing-room/presidential-actions/2021/05/12/executive-order-on-improving-the-nations-cybersecurity/). By using custom roles, a new role can be created with all the limited permissions of the Guest role, and additionally, the ability to view the project dependency list can be added. Therefore, we have a Guest assisting us with compliance with the least privileged access required for their job.\n\nWatch my video on custom roles and granular security permissions with GitLab:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/WyrhkpO5WkI?si=4B4mNYNK9UyNrru8\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Granular permissions\n\nAs of the GitLab 16.8 release, the following granular permissions can be added to any base role:\n\n- Viewing project code\n- Viewing vulnerability reports\n- Changing the status of vulnerabilities\n- Viewing SBOMs\n- Approving merge requests\n- Managing project/group access tokens\n- Adding/removing group members\n- Archiving/unarchiving/removing projects\n- Admin Terraform state\n\nWe will continue to add [more granular permissions](https://docs.gitlab.com/ee/user/custom_roles/abilities.html) with each GitLab release. You can learn more about our roadmap for this feature by referring to the [Granular Security Permissions Epic](https://gitlab.com/groups/gitlab-org/-/epics/10684) and provide feedback in the [customer feedback Issue](https://gitlab.com/gitlab-org/gitlab/-/issues/391760). You also have the ability to contribute to GitLab and [develop your own granular permissions](https://docs.gitlab.com/ee/development/permissions/custom_roles.html).\n\n### Implementation prerequisites\nThe requirements for implementing custom roles are as follows: \n- Owner role in the top-level group in which you are creating the custom role\n- Administrator for the self-managed instance in which you are creating the custom role\n- GitLab Ultimate tier in the top-level group\n- A [personal access token with the API scope](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token)\n\nTo see custom roles in action requires:\n- a private project within the top-level group or its subgroups\n- a guest user within the private project\n\nWhen you enable a custom role for a user with the Guest role, that user has access to elevated permissions, and therefore:\n- is considered a billable user on self-managed GitLab\n- uses a seat on GitLab.com\n\n### Creating the custom role with granular permissions\n\nNow that you know the benefits of implementing custom roles with granular permissions, let's implement them within our GitLab instance:\n\n1. On the left sidebar, select **Search or go to**.\n    - In GitLab SaaS find and select the top-level group in which you want to create a custom role.\n    - In GitLab Self-Managed find and select **Admin Area**.\n2. Select **Settings > Roles and Permissions**.\n    - In GitLab Self-Managed use the top dropdown list to find and select the top-level group in which you want to create a custom role.\n3. Select **Add new role**.\n4. Under Base role to use as a template, select **Guest** for this tutorial.\n5. Under Role name, enter the custom role’s title.\n6. Under Permissions for the custom role, select **Read Vulnerability** for this tutorial.\n7. Select **Create a new role**.\n\n![Create new role screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099455072.png)\n\n\u003Ccenter>\u003Ci>Interface for creating a custom role\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\nAfter creating the role you should be able to see the new custom role along with its ID, Base role, and Permissions. Be sure to save the ID as it will be used when we assign the custom role to a guest user.\n\n![Custom role screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099455073.png)\n\n\u003Ccenter>\u003Ci>Security Auditor role created\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\nNow we must assign the custom role to a group or project member. This can be done as follows:\n1. Invite a user as a direct member with the Guest role to your top-level group where the custom role was created.\n2. You can invite them to a sub-group or private project within the top-level group as well.\n* The guest user should not be able to see any code within the project they have been assigned to.\n* Open your terminal.\n3. Export the required environment variables:\n* Your [personal access token with API scope](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token)\n\n```\n$ export TOKEN=glpat-XXXXXXXXXXXX\n$ echo $TOKEN\nglpat-XXXXXXXXXXXX\n\n```\n\n* The ID of the user we will be granting a custom role to. You can obtain the user id by providing the username to the [User API](https://docs.gitlab.com/ee/api/users.html#list-users). For more information on using the GitLab API, see the [REST API documentation](https://docs.gitlab.com/ee/api/rest/).\n\n```\n$ curl \"https://gitlab.example.com/api/v4/users?username=fjdiaz\"\n[{\"id\":4710074,\"username\":\"fjdiaz\",\"name\":\"Fern\",\"state\":\"active\",\"locked\":false,\"avatar_url\":\"https://gitlab.com/uploads/-/system/user/avatar/4710074/avatar.png\",\"web_url\":\"https://gitlab.com/fjdiaz\"}]\n\n$ export USER_ID=4710074\n$ echo $USER_ID\n4710074\n```\n\n* The ID of the custom role. You can obtain the custom role ID from the ID column in the [custom roles UI](https://docs.gitlab.com/ee/user/custom_roles.html#gitlab-saas) or the [member roles API](https://docs.gitlab.com/ee/api/member_roles.html#add-a-member-role-to-a-group).\n\n```\n$ export CUSTOM_ROLE_ID=1000782\n$ echo $CUSTOM_ROLE_ID\n1000782\n```\n\n* The ID of your group or project. You can obtain the group id from the [group UI](https://docs.gitlab.com/ee/user/group/#get-the-group-id) or using the [groups API](https://docs.gitlab.com/ee/api/groups.html). You can obtain the project ID from the [project UI](https://docs.gitlab.com/ee/user/project/working_with_projects.html#access-the-project-overview-page-by-using-the-project-id) or using the [projects API](https://docs.gitlab.com/ee/api/projects.html).\n\n```\n$ export GROUP_ID=10087220\n$ echo $GROUP_ID\n10087220\n\n$ export PROJECT_ID=45738177\n$ echo $PROJECT_ID\n45738177\n```\n\n4. Associate the guest user with the custom role using the appropriate [group or project APIs](https://docs.gitlab.com/ee/api/members.html#edit-a-member-of-a-group-or-project).\n\n* If the user just needs to role in a project, update the project membership:\n\n```\n\"Authorization: Bearer $TOKEN\" --data '{\"member_role_id\": $CUSTOM_ROLE_ID, \"access_level\": 10}' \"https://gitlab.example.com/api/v4/projects/$PROJECT_ID/members/$USER_ID\"\n```\n\n* If the user just needs to role in a group, update the group membership:\n\n```\n$ curl --request PUT --header \"Content-Type: application/json\" --header \"Authorization: Bearer $TOKEN\" --data '{\"member_role_id\": $CUSTOM_ROLE_ID, \"access_level\": 10}' \"https://gitlab.example.com/api/v4/groups/$GROUP_ID/members/$USER_ID\"\n```\n\nNow that the custom role has been applied to a guest user, when they login, they can see the Vulnerability dashboard present in the Secure tab. Notice, however, that they are still not allowed to see the source code. \n\nThis is useful because it allows users to audit the system without being able to make changes to the code base, which applies the PoLP for those auditing the system for vulnerabilities.\n\n## Security policies\nGitLab provides [security policies](https://docs.gitlab.com/ee/user/application_security/policies/) to help you achieve least privilege access. There are two different types of security policies provided by GitLab:\n- [Scan Execution policies](https://docs.gitlab.com/ee/user/application_security/policies/scan-execution-policies.html) allow project maintainers and administrators the confidence of knowing that the scans they set up have not been changed, altered, or disabled.\n- [Merge Request Approval policies](https://docs.gitlab.com/ee/user/application_security/policies/scan-result-policies.html) prevent insecure code from being merged into production without appropriate approval.\n\nSome examples of how both policy types can be used in unison to provide least privilege access are as follows:\n- remove the ability for developers to disable security scanners\n- remove the ability for developers to merge insecure code\n\nPolicies are stored in a separate repo from the project they are being applied to called the Security Policy Project (SPP). This allows for separate permissions to be set to the SPP vs. the application repo, thus strengthening your ability to separate duties and apply PoLP.\n\n![Security policy hierarchy](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image19_aHR0cHM6_1750099455074.png)\n\n\u003Ccenter>\u003Ci>Security policy hierarchy\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\nTo enforce the policies contained in an SPP you link it to a project, subgroup, group, or multiples of each. An SPP can contain multiple policies but they are enforced together. An SPP enforced on a group or subgroup applies to everything below the hierarchy, including all subgroups and their projects.\n\nSecurity policies can be managed via the policy management UI as well as via yaml. Using the policy editor you can create, edit, and delete policies.\n\n![Policy management interface](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image20_aHR0cHM6_1750099455076.png)\n\n\u003Ccenter>\u003Ci>Policy management interface\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\nFeel free to leverage the [Simple Notes demo environment](https://gitlab.com/gitlab-de/tutorials/security-and-governance/devsecops/simply-vulnerable-notes) to try this yourself by following the provided [DevSecOps tutorial](https://gitlab-de.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/).\n\n### Creating a Scan Execution policy\nNow let's take a look at how to create a Scan Execution policy. Before getting started make sure you have met the following criteria:\n- GitLab Ultimate tier in the top-level group\n- Owner role to create/assign an SPP\n- Developer role or greater to create/edit/delete individual security policies\n\nWe will be creating a policy that automatically runs a SAST scan with each pipeline, regardless of the SAST template is defined within the gitlab-ci.yml:\n\n1. On the left sidebar, select **Search or go to** and search for the project to which you wish to add a policy.\n2. On the project left sidebar, go to **Secure > Policies**.\n3. Select **New policy**.\n4.  In the **Scan Execution Policy** section, select **Select policy**.\n5. Complete the fields:\n    - **Name:** The name of the policy\n    - **Description:** The description of the Policy\n    - **Policy status:** Whether it is enabled or not\n    - **Actions:** What actions to take when the defined conditions are met\n\n![Scan Execution policy actions](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image15_aHR0cHM6_1750099455077.png)\n\n \u003Ccenter>\u003Ci>Scan Execution policy actions\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n  - **Conditions:** Conditions which must be met (a pipeline is triggered or on a set schedule) in order for an action to take place.\n\n    ![Scan Execution policy conditions](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750099455078.png)\n \u003Ccenter>\u003Ci>Scan Execution policy conditions\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n- Press the **Configure with a merge request** button.\n\nNow that the policy has been created, all we need to do is run a pipeline to see that SAST will be present even if it is not defined in the .gitlab-ci.yml.\n\n### Creating a Merge Request Approval policy\n\nNow let's take a look at how to create a Merge Request Approval policy. Before getting started make sure you have met the following criteria:\n- GitLab Ultimate tier in the top-level group\n- Owner role to create/assign an SPP\n- Developer role or greater to create/edit/delete individual security policies\n- Security scanners added to project\n\nWe will be creating a policy that requires approval from project maintainers if any security scanner detects a vulnerability when compared with any branch:\n\n1. On the left sidebar, select **Search or go to** and search for the project to which you wish to add a policy.\n2. On the project left sidebar, go to **Secure > Policies**\n3. Select **New policy**\n4. In the **Merge Request Approval policy** section, select **Select policy**.\n5. Complete the fields:\n    - **Name:** The name of the policy\n    - **Description:** The description of the policy\n    - **Policy status:** Whether it is enabled or not\n    - **Rules:** The conditions which must be met for an action (require approval) to take place.\n\n![Merge Request Approval policy rules](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image17_aHR0cHM6_1750099455079.png)\n\n\u003Ccenter>\u003Ci>Merge Request Approval policy rules\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n   - **Actions:** The action to be taken whenever the conditions in the rules (defined vulnerabilities/licenses detected) are met.\n\n![Merge Request Approval  policy actions](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099455080.png)\n\u003Ccenter>\u003Ci>Merge Request Approval  policy actions\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n   - **Override project approval settings:** If selected, the following choices will overwrite project settings but only affect the branches selected in the policy.\n\n![Merge Request Approval policy approval settings](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image21_aHR0cHM6_1750099455081.png)\n\n\u003Ccenter>\u003Ci>Merge Request Approval policy approval settings\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n6. Press the **Configure with a merge request** button.\n\nNow that the policy has been created, all we need to do is run a pipeline and if SAST detects any vulnerabilities then approvals will be required from the selected approver before the code change can be merged. Merge Request Approval policies can be used with all GitLab security scanners, including license scanning.\n\n![Merge Request Approval policies blocking code from being merged in an MR](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099455082.png)\n\n\u003Ccenter>\u003Ci>Merge Request Approval policies blocking code from being merged in an MR\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n## Branch protections and Code Owners\n[Branch protections](https://docs.gitlab.com/ee/user/project/protected_branches.html) allow you to impose additional restrictions on particular branches within your repository. This further strengthens the PoLP for the interactions on a particular set of branches. \n\nFor example, a protected branch can control:\n- which users can merge into the branch\n- which users can push to the branch\n- if users can force push to the branch\n- if changes to files listed in the CODEOWNERS file can be pushed directly to the branch\n- which users can unprotect the branch\n\n### Applying branch protections\n\nBranch protections are available in all tiers and offerings of GitLab. Branch protections can be applied to a single project or a group of projects. You can apply branch protections for required roles to push and merge as follows:\n\n1. On the left sidebar, select **Search or go to** and find your project or group.\n2. Select **Settings > Repository**.\n3. Expand **Protected branches**.\n4. Select **Add protected branch**.\n    - For groups, from the **Branch** text box, type the branch name or a wildcard.\n    - For projects, from the **Branch** dropdown list, select the branch you want to protect.\n5. From the **Allowed to merge** list, select a role that can merge into this branch.\n6. From the **Allowed to push and merge** list, select a role that can push to this branch.\n7. Select **Protect**.\n\nYou should now see the protected branch added to the list.\n\n![Protected branches settings](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image14_aHR0cHM6_1750099455082.png)\n\n\u003Ccenter>\u003Ci>Protected branches settings\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\nThe Owner role is required to add branch protections to a group and the Maintainer role or greater is required to add branch protections to a project.\n\n### Code Owners\nIf you want to further limit what files developers can perform changes on, one of the best features to implement is [Code Owners](https://docs.gitlab.com/ee/user/project/codeowners/). Code Owners allows you to define who has the expertise for specific parts of your project’s codebase. Defining the owners of files and directories in Code Owners will:\n\n- require owners to approve changes as well as merge requests before they merge into a protected branch\n- identify owners by displaying the Code Owner names on the files and directories they own\n\nTo set up Code Owners, follow these steps:\n1. Create a CODEOWNERS file in your preferred location.\n2. Define some rules in the file following the Code Owners syntax reference. You can configure all eligible approvers' approval rules and require Code Owner approval on a protected branch.\n3. Commit your changes, and push them up to GitLab.\n\nNow, when looking at files, you can see who the Code Owners are for a particular file.\n\n![Code Owners displayed for file](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099455083.png)\n\n\u003Ccenter>\u003Ci>Code Owners displayed for file\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\nIf you implement Code Owner approvals, then when creating a merge request, the Code Owners must approve before the code can be merged.\n\n![Code Owners approvals](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750099455084.png)\n\n\u003Ccenter>\u003Ci>Code Owners approvals\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n### Additional approval settings\nThere are additional approval settings that can be applied before code can be committed with a merge request. These additional approval settings are as follows:\n- prevent approval by author\n- prevent approvals by users who add commits\n- prevent editing approval rules in merge requests\n- require user re-authentication (password or SAML) to approve\n\nAdditionally, whenever a commit is added, you can:\n- keep approvals\n- remove all approvals\n- remove approvals by Code Owners if their files changed\n\n![Additional Approval settings](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image12_aHR0cHM6_1750099455084.png)\n\n\u003Ccenter>\u003Ci>Additional Approval settings\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\nTo configure additional approval settings you can perform the following steps:\n1. On the left sidebar, select **Search or go to** and find your project.\n2. Select **Settings > Merge requests**.\n3. Scroll down to the **Merge request approvals** section.\n4. Under **Approval settings** select the approval settings you would like to apply.\n5. Press the **Save changes** button.\n\nThese can also be applied to your top-level group by performing the following steps:\n1. On the left sidebar, select **Search or go to** and find your top-level group.\n2. Select **Settings > General**.\n3. Expand the **Merge request approvals** section.\n4. Under **Approval settings** select the approval settings you would like to apply.\n5. Press the **Save changes** button.\n\nBy leveraging these approval settings you can make sure that code always obtains oversight by a person who was not involved in creating the code, thereby preventing a conflict of interest.\n\n## Compliance pipelines and frameworks\nYou can create a compliance framework that is a label to identify that your project has certain compliance requirements or needs additional oversight. The label can optionally enforce compliance pipeline configuration to the projects on which it is applied.\n\nFeel free to leverage the [Compliance Frameworks Demo](https://gitlab.com/gitlab-de/tutorials/security-and-governance/compliance-frameworks) group to see an example of compliance frameworks and their usage.\n\n### Create a compliance pipeline\nTo create a compliance pipeline, all you need to do is create a new project which will store a `.gitlab-ci.yml` file that we wish to use in another project. The new compliance pipeline project can have separate permissions from the project to which you will apply it. This is beneficial because it prevents developers from making changes to pipelines that must run.\n\nYou can see I have created the following [pipeline definition](https://gitlab.com/gitlab-de/tutorials/security-and-governance/compliance-frameworks) which:\n- runs the SAST security scanner\n- runs the secret detection scanner\n- runs a SOC2 compliance job\n- runs the original pipeline defined in the project to which we will apply this pipeline. This allows developers to focus on the actual application development and the compliance team to focus on defining the SOC2 rules.\n\n### Create and apply a compliance framework\nNow that the compliance pipeline for SOC2 has been defined, we must define a compliance framework and apply it to our project. In this case, I will apply it to my Accounting Department project.\n\nTo create a compliance framework label, follow these steps:\n1. On the left sidebar, select **Search or go to** and find your group.\n2. Select **Settings > General**.\n3. Expand the **Compliance frameworks** section.\n4. Click the **Add framework** button.\n5. Create a new compliance framework and populate the following sections:\n    - **Name:** The name of your compliance framework\n    - **Description:** A description of your compliance framework\n    - **Compliance pipeline configuration:** The location of the compliance pipeline to run. \n    - **Background color:** A color for the compliance framework label\n\n![PoLP - image 15](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750099455085.png)\n\n   \u003Ccenter>\u003Ci>Creating a compliance framework\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n6. Press the **Add framework** button.\n\nAnd now you should see your newly added framework under active compliance frameworks.\n\n![Active compliance frameworks](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750099455085.png)\n\n\u003Ccenter>\u003Ci>Active compliance frameworks\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\nNow let’s go ahead and assign this compliance label to our Accounting Department project:\n\n1. On the left sidebar, select **Search or go to** and find your project.\n2. Select **Settings > General**.\n3. Expand **Compliance frameworks**.\n4. Select the compliance framework created above.\n\n![Adding a compliance framework](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099455086.png)\n\n\u003Ccenter>\u003Ci>Adding a compliance framework\u003C/i>\u003C/center>\n\n5. Select **Save changes**.\n\nThe project should now have the compliance framework label applied. \n\n![Project running a compliance pipeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750099455086.png)\n\n\u003Ccenter>\u003Ci>Project running a compliance pipeline\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\nThis enables separation of duties and prevents compliance pipelines from being altered by those without permissions.\n\nSecurity Policy Scope and Pipeline Execution\nOver the past several releases, GitLab has introduced two experimental features, Security Policy Scope and Pipeline Execution, to make it even easier to adhere to PoLP. These features are very similar to Compliance Pipelines and Compliance Frameworks and can be managed from GitLab’s security policy UI.\n\n**Note:** These features are currently considered experimental. An experiment is a feature that is in the process of being developed. It is not production ready. We encourage users to try experimental features and provide feedback.\n\nThe [pipeline execution policy action](https://docs.gitlab.com/ee/user/application_security/policies/scan-execution-policies.html#pipeline-execution-policy-action) introduces a new scan action type into Scan Execution policies for creating and enforcing custom CI in your target development projects. You can execute a custom pipeline along with your current pipeline. This allows you to enforce compliance by always forcing particular actions to run that are not just security scanners and that cannot be overwritten by those without permissions.\n\n![Pipeline Execution policy scope selection](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image18_aHR0cHM6_1750099455087.png)\n\u003Ccenter>\u003Ci>Pipeline Execution policy scope selection - insert code block\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n![Pipeline Execution policy scope selection](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image13_aHR0cHM6_1750099455087.png)\n\u003Ccenter>\u003Ci>Pipeline Execution policy scope selection - link existing CI file\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\nThe [Security policy scope](https://docs.gitlab.com/ee/user/application_security/policies/scan-result-policies.html#security-policy-scopes) can be applied to either Merge Request Approval or Scan Execution policies. Scopes enable you to administer policies with a particular scope, meaning you can:\n\n- Include only projects containing a compliance framework label\n- Include or exclude selected projects from enforcement\n\nTo enable these experimental features, follow these steps:\n1. On the left sidebar, select **Search or go to** and find your top-level group.\n2. Select **Settings > General**.\n3. Expand **Permissions and group features**.\n4. Scroll down to the **Security policy management** section.\n5. Select the following checkboxes\n**Security policy pipeline execution action:** Create and enforce custom CI jobs and scripts using this new policy action.\n6. **Security policy scopes:** Granularly scope each policy you create to projects containing a compliance framework label, or a list of projects.\n7. **Enforce for all subgroups (optional):** Subgroups cannot change these settings.\n8. Scroll down to the **Experiment and Beta features** section.\n9. Select the **Use Experiment and Beta features** checkbox.\n10. Scroll down and press the **Save changes** button.\n\nNow, whenever you are creating a security policy, the following options will be available:\n\n- Inserting a CI code block (Scan Execution policy only)\n- Loading CI/CD code from file (Scan Execution policy only)\n- Linking an existing CI file from another project (Scan Execution policy only)\n- Scoping a policy to projects with selected compliance framework (Group Level only)\n- Scoping a policy towards specific projects (Group Level only)\n- Scoping a policy towards all projects in group (Group Level only)\n\nTo learn more about these features, check out the following documentation:\n- [Pipeline Execution Policy action (Scan Execution policy)](https://docs.gitlab.com/ee/user/application_security/policies/scan-execution-policies.html#pipeline-execution-policy-action)\n- [Security Policy Scopes (Scan Execution policy)](https://docs.gitlab.com/ee/user/application_security/policies/scan-execution-policies.html#security-policy-scopes)\n- [Security Policy Scopes (Merge Request Approval policy)](https://docs.gitlab.com/ee/user/application_security/policies/scan-result-policies.html#security-policy-scopes)\n\n## Additional resources\n\nThanks for reading! These are some of the ways that GitLab allows you to strengthen your organization's security posture through the enablement of PoLP. To learn more about GitLab and the other ways we can strengthen your organization's security throughout all parts of the SDLC, check out the following links:\n\n- [GitLab Security and Compliance](https://about.gitlab.com/solutions/security-compliance/)\n- [GitLab Application Security Documentation](https://docs.gitlab.com/ee/user/application_security/)\n- [GitLab DevSecOps Demo Project](https://gitlab.com/gitlab-de/tutorials/security-and-governance/devsecops/simply-vulnerable-notes)\n- [GitLab DevSecOps Tutorial](https://gitlab-de.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/)\n- [GitLab Roles and Permissions Documentation](https://docs.gitlab.com/ee/user/permissions.html)\n- [GitLab Custom Roles Documentation](https://docs.gitlab.com/ee/user/custom_roles.html)\n- [GitLab Security Policies Documentation](https://docs.gitlab.com/ee/user/application_security/policies/)\n- [GitLab Compliance Frameworks Documentation](https://docs.gitlab.com/ee/user/group/compliance_frameworks.html)\n- [GitLab Code Owners Documentation](https://docs.gitlab.com/ee/user/project/codeowners/)\n- [GitLab Branch Protections Documentation](https://docs.gitlab.com/ee/user/project/protected_branches.html)",[9,765,681,846],"features",{"slug":848,"featured":91,"template":685},"the-ultimate-guide-to-least-privilege-access-with-gitlab","content:en-us:blog:the-ultimate-guide-to-least-privilege-access-with-gitlab.yml","The Ultimate Guide To Least Privilege Access With Gitlab","en-us/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab.yml","en-us/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab",{"_path":854,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":855,"content":861,"config":867,"_id":869,"_type":14,"title":870,"_source":16,"_file":871,"_stem":872,"_extension":19},"/en-us/blog/zero-trust-at-gitlab-data-zones-and-authentication-scoring",{"title":856,"description":857,"ogTitle":856,"ogDescription":857,"noIndex":6,"ogImage":858,"ogUrl":859,"ogSiteName":670,"ogType":671,"canonicalUrls":859,"schema":860},"Zero Trust at GitLab: Data zones & authentication scoring","How we're defining and aligning data zones in our Zero Trust implementation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680808/Blog/Hero%20Images/fabio-oyXis2kALVg-unsplash.png","https://about.gitlab.com/blog/zero-trust-at-gitlab-data-zones-and-authentication-scoring","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Zero Trust at GitLab: Mitigating challenges with data zones and authentication scoring\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Loveless\"}],\n        \"datePublished\": \"2019-09-06\",\n      }",{"title":862,"description":857,"authors":863,"heroImage":858,"date":864,"body":865,"category":681,"tags":866},"Zero Trust at GitLab: Mitigating challenges with data zones and authentication scoring",[719],"2019-09-06","\n\nUpdate: This is part 4 of an ongoing [Zero Trust series](/blog/tags.html#zero-trust). See our next post: [Zero Trust at GitLab: Implementation challenges (and a few solutions)](/blog/zero-trust-at-gitlab-implementation-challenges/).\n{: .alert .alert-info .note}\n\n\nZero Trust is the practice of shifting access control from the perimeter of the organization to the individuals, the assets, and the endpoints. For GitLab, Zero Trust means that all devices trying to access an endpoint or asset within our GitLab environment will need to authenticate and be authorized. This is part four of a multi-part series.\n\n* Part one: [The evolution of Zero Trust](/blog/evolution-of-zero-trust/)\n* Part two: [Zero Trust at GitLab: problems, goals, and coming challenges](/blog/zero-trust-at-gitlab-problems-goals-challenges/)\n* Part three: [Zero Trust at GitLab: The data classification and infrastructure challenge](/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge/)\n\n\nIn previous blog posts we’ve covered both the history of the whole Zero Trust Networking (ZTN) scenario, and some of the areas where we expect challenges to successful implementation. In this post we’ll discuss the concept of “data zones” as well as an “authentication scoring system.” The general concept of data zones is not new; it is an established part of a layered security approach where zones of trust are created around groups of data, usually on the same network segment or system. A few things to note:\n* Our data zone concept simply groups the data according to access controls available for a system when granular control is not possible.\n* Our authentication scoring system is intended to augment our ability to allow access.\n* We’ve set up all of our access based upon the team member’s identity and job description, but it should also include information about the device and even the geographic location of the team member (as we shall see later).\n\n## Defining data zones\nWe have previously defined the [classification of data](/handbook/security/data-classification-standard.html) to include RED, ORANGE, YELLOW, and GREEN. We’ve also touched on the concept of [moving data either via automated or manual means, and data being transformed](/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge/). Where the data is stored should reflect the classification.\n\nThe immediate challenge with regards to data access is when data that is considered RED or ORANGE is stored on a system that has limited access controls, and granting granular access isn’t possible. In other words, we need to define zones where multiple classes of data may reside on a system that is unable to provide separation of access controls based upon our own data classification. The most common scenario will be either a legacy system or a system developed outside of our control, such as a SaaS company offering.\n\nWe’ve defined four zones that match the data classifications, and named them after the colors of the data classification:\n\n* RED ZONE for RED and lower data\n* ORANGE ZONE for ORANGE and lower data\n* YELLOW ZONE for YELLOW and lower data\n* GREEN ZONE for GREEN (this may not be needed as it is the lowest classification)\n\nIn general, the zones apply to data at rest. Data in transit, either transitioning in system memory between subsystems or transferred over a network between systems, defaults to RED ZONE since access at that level is considered critical. Therefore the ability to access systems at a low enough level to examine RAM or monitoring calls between systems is definitely considered the highest level of restriction, and data moving between systems is subject to the highest levels of encryption.\n\n### Here are the basic rules for a zone:\n\n* A zone can contain its own “color” of data or lower, not higher.\n* A zone will not allow access to a lower “color” of data within its boundaries without authorization to access the highest designation of “color” for that zone.\n* The boundaries of a zone are defined by the access controls specific to that zone.\n\nTo illustrate, if a YELLOW ZONE is set up to contain YELLOW data, it cannot contain RED or ORANGE. And while it can contain GREEN data, team members with access to GREEN cannot access that data while it resides in the YELLOW ZONE. **In essence, each zone where data resides must have controls that consider what data they might possibly contain.**\n\nTo explain this further, let’s say that there is a database that contains ORANGE and YELLOW data within it, but the controls in place are not granular – access to the database means access to all of the data contained within it. Therefore this database would be considered ORANGE ZONE, and those with access to only YELLOW data could not be allowed access that data in this database because it is in ORANGE ZONE.\n\n## Authentication scoring\nThere will be a scoring system for access to data, where a team member is ranked. This isn’t so much an actual system for points, but more of a reference guide on what it takes to be able to access different data.\n\n![Authentication scoring reference guide](https://about.gitlab.com/images/blogimages/authentication-scoring.png){: .shadow.small.center}\n\n### The earning of points is as follows:\n\n#### Basic access\n\nOne point for basic authentication. This gets you access to the GREEN ZONE and GREEN data.\n\n#### Basic access with U2F\n\nOne point if second factor authentication comes through the proper channel (for GitLab team members that is Okta with approved MFA, such as U2F). Two points are required to access the YELLOW ZONE and YELLOW data, so this, coupled with the previous one point for authentication, allows the access.\n\n#### Managed device\n\nOne more point if the authentication comes via a managed device (a device GitLab has issued to the team member). This is sufficient to allow access to ORANGE ZONE and ORANGE data.\n\n#### Healthy managed device\n\nIf the managed device is in proper health (passes checks for patches, proper configuration, etc) an additional point is given, which allows access to the RED ZONE and RED data. This is not to imply that we will allow “unhealthy devices” to access ORANGE data (for example), but that the requirement is strictly enforced for RED ZONE and RED data.\n\n#### Geolocation\n\nA final point is given for a managed device with proper health from proper geolocation (this is dependent on the particular RED data being accessed). There may be a requirement that specific data can only be accessed from specific countries, and this is to account for that.\n\n## A summary and what’s next\nIt should be apparent at this point we have a fairly complex situation to administer. We do protect our data but we want more granular control over the access to the data. In a lot of organizations, administrators will end up denying access to parts of their system to employees, and end up having to export portions of the data to those denied access. Additionally, sometimes administrators will grant too much access to employees who simply need to access small segments and do not need the full access.\n\n**At GitLab we not only want to avoid that, but we want to document, log, and automate as much of the granular control as possible. This makes other challenges such as onboarding, offboarding, provisioning of access devices, auditing, and other processes much easier. And if it is easier on both the team member and the administrators managing the systems, full adoption is much simpler. The last thing GitLab wants to do is to prevent or curtail the rapid growth we are experiencing.**\n\nDesigning data zones and coming up with an authentication method to gain access to the data in its zone will help clarify how we want to approach some of the challenges. We have a decent start, but to fully explain how they will need to be applied, we’ll go into a lot more detail in the next post. We’ll also discuss some specific ways to address challenges involving our infrastructure, including the role of managed devices.\n\n*Special shout-out to the entire security team for their input on this blog series.*\n\nPhoto by [fabio](https://unsplash.com/@fabioha?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n{: .note}\n",[723,681,9],{"slug":868,"featured":6,"template":685},"zero-trust-at-gitlab-data-zones-and-authentication-scoring","content:en-us:blog:zero-trust-at-gitlab-data-zones-and-authentication-scoring.yml","Zero Trust At Gitlab Data Zones And Authentication Scoring","en-us/blog/zero-trust-at-gitlab-data-zones-and-authentication-scoring.yml","en-us/blog/zero-trust-at-gitlab-data-zones-and-authentication-scoring",{"_path":874,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":875,"content":881,"config":886,"_id":888,"_type":14,"title":889,"_source":16,"_file":890,"_stem":891,"_extension":19},"/en-us/blog/zero-trust-at-gitlab-implementation-challenges",{"title":876,"description":877,"ogTitle":876,"ogDescription":877,"noIndex":6,"ogImage":878,"ogUrl":879,"ogSiteName":670,"ogType":671,"canonicalUrls":879,"schema":880},"Zero Trust at GitLab: Implementation challenges (and a few solutions)","Implementing change in an already working environment always brings its fair share of growing pains. What happens when that change is Zero Trust?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665982/Blog/Hero%20Images/jpvalery-9pLx0sLli4unsplash.jpg","https://about.gitlab.com/blog/zero-trust-at-gitlab-implementation-challenges","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Zero Trust at GitLab: Implementation challenges (and a few solutions)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Loveless\"}],\n        \"datePublished\": \"2019-10-02\",\n      }",{"title":876,"description":877,"authors":882,"heroImage":878,"date":883,"body":884,"category":681,"tags":885},[719],"2019-10-02","\n\nUpdate: This is part 5 of an ongoing [Zero Trust series](/blog/tags.html#zero-trust). See our next and final post in this series: [Zero Trust at GitLab: Where do we go from here?](/blog/zero-trust-at-gitlab-where-do-we-go-from-here/).\n{: .alert .alert-info .note}\n\n\n*Zero Trust is the practice of shifting access control from the network perimeter to the assets, individuals, and the respective endpoints. For GitLab, Zero Trust means that all users and devices trying to access an endpoint or asset within our GitLab environment will need to authenticate and be authorized. This is part five of a multi-part series.*\n\nCheck out these other posts to get up to speed:\n* Part one: [The evolution of Zero Trust](/blog/evolution-of-zero-trust/)\n* Part two: [Zero Trust at GitLab: Problems, goals, and coming challenges](/blog/zero-trust-at-gitlab-problems-goals-challenges)\n* Part three: [Zero Trust at GitLab: The data classification and infrastructure challenge](/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge/)\n* Part four: [Zero Trust at GitLab: Mitigating challenges with data zones and authentication scoring](/blog/zero-trust-at-gitlab-data-zones-and-authentication-scoring/)\n\nAs with most things at GitLab, we’re taking a very open approach to implementing Zero Trust. We’ve tackled everything from the evolution of Zero Trust to the expected challenges and our planned work-arounds.  However, maybe we haven’t yet addressed a ZTN related topic, question or consideration that you’re interested in discussing.\n\nWe’ve been discussing how Zero Trust Networking (ZTN) presents GitLab with a series of challenges, and have suggested a few mitigation strategies. In order to fully understand some of these challenges and how to approach them we’ll need to drop a bit deeper into the details.\n\n## Immediate Challenges\n\nThe first major hurdle that comes with discussing Zero Trust Networking (ZTN) is a classic one: Getting the plan implemented. Any security professional who has tried to implement changes to an already working environment has experienced these growing pains. GitLab is an extremely forward-thinking organization and we're constantly implementing massive changes to our software. But this still doesn’t mean everyone welcomes every change with open arms.\n\nCurrently, things work. We have an environment that is remarkably stable and pretty secure despite all the changes. When the security department starts rumbling about certain types of changes, there is resistance. So we have to look at things a bit differently to get some of our ideas implemented. How do we do this?\n\n## Real Problems\nWe’ve previously discussed areas where we anticipated problems, but what we really need to do is look at existing problems and work out solutions. If we can get some hash marks in the “win” column for ZTN, it helps prove that ZTN is a rational approach for security. If we can solve some pressing problems along the way (or make older, less robust solutions better) it improves the general appeal for ZTN. It is one thing to expect resistance, it is another to encounter it. Security changes need to make things easier for the end-user, otherwise, the end-user will fight and try to bypass what are perceived as roadblocks to productivity. We can’t make every single person happy, but we can try to make as many users as happy as possible while making every single person a bit safer. That said, we encountered a bit of resistance in a few areas.\n\n### User identity\nIn the past, we’ve had issues with provisioning user accounts – we’d need to get a team member set up in all of the systems as quickly as possible. When the entire company had 35 people this was not that great of a burden. But right now the Security Department alone has 35+ people (and counting, we’re [hiring](/jobs/), hint hint) and we’ve had times where 35 people started at GitLab in a single week.\n\nWe’re growing! Any time we make changes to the process of user-identity, we have to keep in mind that most of the departments are more concerned with provisioning new users than actual user identity. Their main goal is to get new team members productive as quickly as possible, so access to systems immediately is crucial. Ideally, any solution for user-identity should work seamlessly with the entire user lifecycle – provisioning through deprovisioning – without disruption to company productivity.\n\n### Device management\nWe have issues with both device identification and device management. We need to strike a balance between ensuring team members have access to the tools they need to perform their jobs, and simply allowing team members to use whatever computing device they want to complete tasks and maintain productivity. Interestingly, this is one area where GitLab’s distinctive background as a company has created a rather unique challenge. We started as an open source project and only in the last couple of years have we been purchasing laptops for team members (for years it was [BYOD](https://en.wikipedia.org/wiki/Bring_your_own_device)). To help in this area, we’ve [standardized](/handbook/business-technology/team-member-enablement/onboarding-access-requests/#laptops) what the company will purchase for new team members (and older team members are certainly eligible for new systems). Having a standard platform is great. Our issue here is both a cultural one as well as a practical one.\n\nSince our roots are in BYOD, we cannot simply turn off BYOD overnight. In fact, I see a lot of benefits to BYOD in certain scenarios – typing up blog posts on a tablet in a coffee shop seems fine, code commits to critical systems are not. Anyone can contribute – that is a cultural core belief and our [mission](/company/mission/#mission) at GitLab. We have team members as well as non-employees that contribute to our code base, our handbook, and everything else we do. We don’t have some of the normal corporate standardization that a typical brick-and-mortar company might have, such as using the corporate-issued-laptop only with asset tracking and patch management built-in, forbidding the use of BYOD, and so on. We do have policies in place, but they are not proactively enforced because we lack the asset management solutions at the moment to do so at the level we desire.\n\nAs a security professional, I am thrilled we have standardized on Linux as our main infrastructure platform, Macs for team members, and engineering team members have a choice of Mac or Linux for the work laptop. No Microsoft Windows.\n\nHowever, trying to find a solution for asset management for our chosen platforms is a challenge. Most vendor solutions are Windows and Mac or Windows and Linux. There are some vendor solutions that support both Mac and Linux, but are often the more “Windows and Mac, and well, sort of Linux if you only run this ancient binary that dates back to an acquisition three years ago, I think Alice is still here from that acquisi- no wait she left” flavor.\n\nI haven’t even discussed phones. These are commonly used for various methods of multi-factor authentication, although we don’t currently have a good way to ensure the phones used for MFA are secured and fully patched. And many team members access work applications on their phones – email, Slack, Zoom, and Expensify, to name a few.\n\n### Sprawling infrastructure\nTo complicate things we have hundreds of servers/containers/instances on numerous cloud offerings spread around the world, and dozens of cloud-based “Something-or-other as a Service” offerings we use as a company. While we don’t administer all systems via [SSH](https://en.wikipedia.org/wiki/Secure_Shell) ([Chef](https://www.chef.io/) and [Knife](https://docs.chef.io/knife.html) are used heavily in our environment) there are still challenges with provisioning, and that we’re currently unable to enforce two factor for SSH. Yes, we can use Yubikeys to store keys and a few other tricks for SSH access, but getting things set up for team members to administer these systems is daunting.\n\n## Wins\nA lot of our problems with identity management at GitLab were solved by [implementing Okta](/handbook/business-technology/okta/#how-is-gitlab-using-okta), and entire departments were thrilled. Provisioning steps that took days had been reduced to minutes. [Okta](https://en.wikipedia.org/wiki/Okta_(identity_management)) has a number of features that supported our vision of ZTN, so if we can solve some ZTN problems with Okta, we’re doing it with a proven solution that people already use. If we can solve a problem with Okta it will be a much easier “sell” to the various impacted departments, and since we can implement a lot of our ZTN model with Okta, it is a win-win situation.\n\nWhile the Security Team felt that a number of security problems were solved with Okta, this was not how the product was “sold” to the rest of the company. The ZTN benefits were pitched as business solutions to existing business problems to the various business and application owners in GitLab – meeting provisioning and compliance needs. It was not sold as a security solution, and this approach worked well.\n\nOur use of Chef along with Knife has been a massive help with infrastructure changes, and has simplified a lot of the usual drudgery associated with system administration. For example, pushing code changes out to multiple systems is much simpler.\n\n_Can we apply any of the wins to our existing challenges?_\n\n### Enforcing Okta Everywhere\n\nBy trying to get the numerous SaaS solutions we use to only be accessed via Okta, we are looking to solve 70% (a WAG at best) of our issues in the SaaS area. This does not address everything. Some of the access to these systems requires not just traditional web-based access but API access as well. Not all systems integrate with Okta, or API access is completely separate, but this approach is working so far and things have gone reasonably well where Okta is implemented.\n\n### Linux-based Infrastructure\nIn the sprawling [infrastructure](/handbook/engineering/infrastructure/) arena, our greatest challenge is that some of our most critical assets are administered via SSH. As a result, we have issues with provisioning, deprovisioning, and enforcing other aspects of authentication that we take for granted with web-based assets. We are seriously looking at leveraging Okta and their [Advanced Server Access](https://www.okta.com/products/advanced-server-access/) (ASA) product, which looks like we could integrate SSH accounts into the Okta mix. Using ASA could allow for provisioning of a new administrator via group assignment. Since we get multi-factor, enforcement of GeoIP, and a few other bells and whistles via Okta, by using ASA we could resolve one of the hardest problems we currently face. This has the added benefit of making the compliance and auditing folk happy, to say nothing of just general time-savings.\n\nWhile ASA (and any similar product) requires we install software on the server side, we do have Chef and Knife to help with deployment. Rollout could happen quickly. Our main issues here would be performance impact and client software distribution, although a regulated testing period and a decent rollout plan could help solve those issues.\n\n### All those devices\nThis one is ugly. While moving more and more systems into Okta helps, it also emphasizes our biggest weakness – device management. After importing Okta logs into other systems for analysis, we can see what our team members are using to access GitLab assets. The good news is the majority of team members are using company-issued laptops, although we are not sure what patch level or configurations are in place. We do have company standards, but we do not have the level of control we’d like to ensure these standards are met. For example, we’d like to ensure that all team member systems accessing critical information ([RED data](/handbook/security/data-classification-standard.html#data-classification-levels)) are doing it from a company-issued system that is up-to-date on patches and is properly configured. We’d prefer to do it at the time of authentication, and not after the fact via log mining.\n\nPhones are already a touchy subject, since this is the main BYOD device allowed on most corporate networks. We use Expensify, and I cannot imagine using it without the phone app even though it is possible. I love using Okta Verify on my phone, and approving push multi-factor from my Apple Watch. I know Okta has some potential solutions, but unless there is a solution from any vendor that is BYOD friendly instead of full takeover MDM, I can’t imagine successfully selling it to fellow team members.\n\nThe main issue here is that device management is an important part of ZTN, and the tools to make this happen at the quality level we’d like don’t seem to exist. As stated earlier, we have a mixture of Mac and Linux desktops so we’d like one solution to make this happen, not two.\n\n## Conclusion\nWe did not intend for this blog post to be an Okta commercial, but it does happen to meet our needs for part of this whole ZTN equation. We’re still searching for a solution for asset management. I wish I could claim this was not a commercial for an asset management solution, because that is quite the challenge.\n\nWhile we still have a long way to go, we have a better handle on direction. Our points of resistance – both from team members impacted by change and technological limits based upon our environment – are showing where we need to focus but also how we need to approach things. Finding cultural and technological areas where we are doing well are huge strengths we can leverage, and by focusing our efforts on those areas, more of our environment benefits from Zero Trust.\n\nIf you’re implementing ZTN and have similar (or different) problems, I’d love to discuss. If you’ve got thoughts or a question, comment below, we’d love to hear from you.\n\n\n*Special shout-out to the entire security team for their input on this blog series.*\n\nPhoto by [Jp Valery](https://unsplash.com/@jpvalery?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n",[723,681,9],{"slug":887,"featured":6,"template":685},"zero-trust-at-gitlab-implementation-challenges","content:en-us:blog:zero-trust-at-gitlab-implementation-challenges.yml","Zero Trust At Gitlab Implementation Challenges","en-us/blog/zero-trust-at-gitlab-implementation-challenges.yml","en-us/blog/zero-trust-at-gitlab-implementation-challenges",{"_path":893,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":894,"content":900,"config":905,"_id":907,"_type":14,"title":908,"_source":16,"_file":909,"_stem":910,"_extension":19},"/en-us/blog/zero-trust-at-gitlab-problems-goals-challenges",{"title":895,"description":896,"ogTitle":895,"ogDescription":896,"noIndex":6,"ogImage":897,"ogUrl":898,"ogSiteName":670,"ogType":671,"canonicalUrls":898,"schema":899},"Zero Trust at GitLab: Problems, goals, and coming challenges","We map out our Zero Trust goals, the challenges we expect to encounter along the way, and how we plan to address them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680704/Blog/Hero%20Images/beasty-ztblog-unsplash.jpg","https://about.gitlab.com/blog/zero-trust-at-gitlab-problems-goals-challenges","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Zero Trust at GitLab: Problems, goals, and coming challenges\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Loveless\"}],\n        \"datePublished\": \"2019-08-09\",\n      }",{"title":895,"description":896,"authors":901,"heroImage":897,"date":902,"body":903,"category":681,"tags":904},[719],"2019-08-09","\n\nUpdate: This is part 2 of an ongoing [Zero Trust series](/blog/tags.html#zero-trust). See our next post: [Zero Trust at GitLab: The data classification and infrastructure challenge](/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge/).\n{: .alert .alert-info .note}\n\n\n_Zero Trust is the practice of shifting access control from the perimeter of the organization to the\nindividuals, the assets and the endpoints. For GitLab, Zero Trust means that all devices trying\nto access an endpoint or asset within our GitLab environment will need to authenticate and\nbe authorized. This is part two of a multi-part series. Read the first post, [The evolution of Zero Trust](/blog/evolution-of-zero-trust/)._\n\nThe benefits of Zero Trust Networking (ZTN) on paper are ideal and many rush to\nimplement it with no idea exactly what it is (or how to get there).\nWe [previously discussed the evolution of ZTN](/blog/evolution-of-zero-trust/)\nand have been working on ways to implement things since then.\n\nVendors have appeared left and right, trying to sell their own versions of ZTN, but curiously no\ntwo seem to have the same interpretation. All of them say\nthey have a product that does ZTN and it is “easy to use,” but of course if you nail them down on\nthe implementation they all seem to fall short in one area or another.\n\n## Defining the problem\n\nI alluded to this above, but let’s clearly restate the problem – how does a diverse company\nimplement Zero Trust when no one can seem to agree on any of it? The most common issue for those\ntrying to roll out a ZTN is that, to be successful, everything must work together.\nThe basic principles of Zero Trust – positively identify the user, positively identify\nthe device accessed as managed and secure, ensure the user and the device\nare allowed to get into the asset they are trying to access, and do all of this in real time –\nare somehow very hard for most people to understand.\n\nWe probably should have clearly stated in our last blog post that the biggest problem with\n[the BeyondCorp solution](https://cloud.google.com/beyondcorp/) outlined by Google is that it is\ntailored for Google. This series\nof blog posts will take a closer look at the issues we are trying to solve, along with our\nworking approach. It is tailored for GitLab and won’t necessarily work everywhere else.\nHopefully it will provide insight into our thought process, encourage some discussion, and\npossibly help others along their own paths.\n\n## What we want\n\nImplementing ZTN at GitLab involves determining what we want out of this, which is really an\nextension of our objectives for [GitLab security](/handbook/security/#security-vision)\nin general. What we want to do is the following:\n\n- **Protect the data that needs to be protected.** Different types of data need to be protected at\ndifferent levels, so we must be able to have that flexibility.\n- **Positive team member identification.** When a team member authenticates, we need to know it\ntruly is that team member, and we need to know what the allowable data is. This needs\nto happen in real time.\n- **Positive device identification.** We need to identify the authenticated team member’s access\ndevice, and based upon the level of trust associated with that device, determine whether the\ndevice is allowed to access particular data, regardless of team member identity. This needs to\nhappen in real time.\n- **Geo-location identification.** We need to identify the team member’s location while at work\nand restrict access to certain data based upon team member geolocation. This\nneeds to happen in real time.\n- **Automated access.** We need to subject all automated processes that access data to the\nsame data protection policies as team members and devices. Again, this needs to happen in real time.\n- **Logging.** We need to properly log all transactions for auditing and monitoring purposes.\n- **No weakening of existing controls.** Data must be protected at rest and in transit. Any\nand all solutions should not detract from this.\n- **Security should make things easier, not harder.** If we do this correctly, the process\nwill be streamlined. Team members in general should be able to do their jobs effectively and\nquickly. Security should be so streamlined that the process is not cumbersome, as this tends\nto inspire some team members to try and bypass it.\n\n## Expected challenge areas\n\nWe’ve laid out what we want to do, and it was pleasant to discover in quite a few cases\nwe are already doing just that. We just lacked either the real-time component or we\nsimply had some type of inconvenient workaround to protect data that inhibited team members\nfrom doing their jobs easily. Based upon that knowledge and what we want to achieve, we’ve\ndevised a list of potentially challenging areas we may encounter:\n\n### Our network\n\nWe are a company that has no perimeter to speak of, as all team members are\nremote. In a way this is good, since [we don’t have a corporate\nVPN](/handbook/security/#why-we-dont-have-a-corporate-vpn) and therefore don't have to\nface dismantling it. But we do have to ensure that we maintain some semblance of control so we\nare assured that as a team member authenticates, it is done in a safe way and is independent\nof the network they are using.\n\n### Our apps and our data\n\nWe use a number of products, including our own DevOps\noffering. While we can control our own product and alter it to better serve our needs\n(and subsequently release said changes to our customers for their own needs), this does not\naddress the public cloud offerings our infrastructure is based on and that we use on\na regular basis: GCP, AWS, Azure, and Digital Ocean. Nor does it consider the variety of\ncontrols (and their variations) used to administer and secure these platforms. Solutions\nthat present themselves as working just fine on one cloud offering may not work the same\n(or at all) on another platform, which causes its own challenges. It also does not address\nadditional services such as Slack, Expensify, BambooHR, Zendesk, and others that contain\ndata we have to protect. Therefore, we need to extend our protection to cover our\ndata no matter where it resides.\n\n### It’s not just us\n\nWe sell services to customers including private groups and projects\non GitLab.com that need protecting from the public and whose access is restricted from\nus on a need-to-know basis.\n\n### Scaling\n\nWe’re growing at a rather accelerated rate, in terms of both customers and team\nmembers. All solutions to problems have to scale, including security solutions such as ZTN.\n\n### Our customers are global\n\nThere are contractual obligations, and as well as regulatory\nand compliance issues, across the globe for our customers that need to be observed.\n\n### Our team members are also global\n\nThere are unique issues across our diverse team\nmember base, residing in more than a quarter of all countries on the planet. Each country has its\nown regulations, standards, and needs.\n\n## Coming next\n\nWe think some of these challenge areas might look familiar to many of you, and while we\nhope this post has been useful we're definitely heading into some deep and murky water going forward!\nIn the next post, we’ll take a dive into the deep end of this far-from-straightforward issue in to one of the more\nchallenging areas: our data and the infrastructure it resides upon.\n\n*Special shout-out to the entire security team for their input on this entire blog series.*\n\nPhoto by [beasty](https://unsplash.com/@beastydesign?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[723,681,9],{"slug":906,"featured":6,"template":685},"zero-trust-at-gitlab-problems-goals-challenges","content:en-us:blog:zero-trust-at-gitlab-problems-goals-challenges.yml","Zero Trust At Gitlab Problems Goals Challenges","en-us/blog/zero-trust-at-gitlab-problems-goals-challenges.yml","en-us/blog/zero-trust-at-gitlab-problems-goals-challenges",{"_path":912,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":913,"content":919,"config":924,"_id":926,"_type":14,"title":927,"_source":16,"_file":928,"_stem":929,"_extension":19},"/en-us/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge",{"title":914,"description":915,"ogTitle":914,"ogDescription":915,"noIndex":6,"ogImage":916,"ogUrl":917,"ogSiteName":670,"ogType":671,"canonicalUrls":917,"schema":918},"Zero Trust at GitLab: The data classification and infrastructure challenge","The classification of data is a huge step in the right direction when it comes to handling Zero Trust, but it comes with its own set of challenges.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679055/Blog/Hero%20Images/close-up-colorful-colors-40799.jpg","https://about.gitlab.com/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Zero Trust at GitLab: The data classification and infrastructure challenge\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Loveless\"}],\n        \"datePublished\": \"2019-08-21\",\n      }",{"title":914,"description":915,"authors":920,"heroImage":916,"date":921,"body":922,"category":681,"tags":923},[719],"2019-08-21","\nUpdate: This is part 3 of an ongoing [Zero Trust series](/blog/tags.html#zero-trust). See our next post: [Zero Trust at GitLab: Mitigating challenges with data zones and authentication scoring](/blog/zero-trust-at-gitlab-data-zones-and-authentication-scoring/).\n{: .alert .alert-info .note}\n\nZero Trust is the practice of shifting access control from the perimeter of the org to the individuals, the assets, and the endpoints. For GitLab, Zero Trust means that all devices trying to access an endpoint or asset within our GitLab environment will need to authenticate and be authorized. This is part three of a multi-part series.\n\nCheck out these other posts to get up to speed:\n* Part one: [The evolution of Zero Trust](/blog/evolution-of-zero-trust/)\n* Part two: [Zero Trust at GitLab: Problems, goals, and coming challenges](/blog/zero-trust-at-gitlab-problems-goals-challenges)\n\nOne of the main objectives for the Security team at GitLab is to protect data, regardless of whether it is our customer data or employee data. Instead of simply viewing Zero Trust Networking (ZTN) as some type of solution for authentication, we also look at it as a way to further our data protection. This poses specific challenges for both the data and the infrastructure the data resides upon.\n\n### Dealing with data classification\n\nWe’ve established a [classification of data policy](/handbook/security/data-classification-standard.html) at GitLab, so we understand the protections necessary. The emphasis of the data classification policy is to define mapping between access controls and data, where the level of sensitivity of the data can appropriately be protected. To help with the understanding and to allow for quicker identification, the four data classification levels are mapped to a color coding. The color codings are `RED`, `ORANGE`, `YELLOW`, and `GREEN` – with `RED` being the most sensitive data, down to `GREEN` being public data.\n\nThis classification of data is a huge step in the right direction when it comes to handling ZTN. That being said, when it comes to data classification there are a few areas where we anticipate challenges with regards to ZTN:\n\n* **The state of data changes over time.** Data that is in one classification may change over time based upon any number of factors. An example is `ORANGE` sales data for a public company – if disclosed before a certain date this could lead to insider trading. However after a certain date the sales data would become public, or `GREEN` data.\n\n* **Tracking of data/metadata.** The tracking of data and its metadata, including origin and classification that define requirements for handling, is non-trivial. Applying labels (data classification labels, not to be confused with the [labeling done within the GitLab software itself](https://docs.gitlab.com/ee/user/project/labels.html)) to data helps in enabling control of the data. These labels can be related to the data’s function as well as conditional access controls needed.\n     * For example, a US DoD instance of GitLab might require certain data labels such as “US citizen,” “on US soil when accessing,” “part of the US DoD project team,” and “GitLab team member not a contractor” in addition to other more standard restrictions. It is notable that the process of data labeling could be beneficial to meeting compliance standards as well, e.g. GDPR article 15 removal requests.\n\n* **Time limits on certain data.** Applying data classification labels to data will require time limits. In the above example, the label is “part of the US DoD project team,” and access to this data may expire after 30 days and need to be removed/re-applied for/auto-extended under certain circumstances, etc.\n\n* **Capability of data.** Departmental data collected might be subject to classification based upon what it is capable of instead of what it actually seems to be (think Tenable scanning data). The same would apply to customer-generated data, such as ZenDesk tickets. Basically, because we cannot control what a customer might say or what a security scan might find. It is possible that someone could have access to a system or even manage parts of that system, yet should not be able to see all of its data.\n\n* **Movement of data.** Departmental data that is transferred between systems, either automated or manual, could affect the classification of itself or the surrounding data, especially if the data is transformed or cleansed in some way. For example, situations and potential security problems reported via ZenDesk or HackerOne are often transferred to GitLab.com so we can “work the issue.” These are often sanitized to a degree. Here is a more detailed example to illustrate this:\n     * If the XYZ corporation reports a problem which is entered into ZenDesk, an issue is created for the Security team to work to resolution, and the data is in essence transformed. If the problem is authentication bypass using the APIs and it affects all customers on GitLab.com, only the mechanics of the bypass itself are considered relevant, and the fact that XYZ corporation reported it is not important to the resolution process. Therefore, XYZ corporation can be scrubbed from the Security team’s issue (and should be). As the original issue impacted XYZ corporation, it might have been considered `ORANGE` data impact, but the real impact affects more than one customer, so the problem is considered an impact to `RED` data. After a patch and resolution of the problem, we make the details of the situation public and include vulnerability, patch, and resolution information. We state it was reported to us by “a customer.” Association with XYZ corporation would still be `ORANGE` data. However, the previous `RED` classification of the problem itself is now considered `GREEN` since the problem is resolved and we have made the problem and its solution public.\n\nAs you can see, on the surface there seems to be no problem with securing our data with the assistance of ZTN, but once you start to explore \"edge cases\" one begins to reach the conclusion that these are not actually edge cases, but working examples of how we interact with our data. In most examples, this will not be a problem as we have granular control over our data, but when it comes to ZTN we need to make sure we consider the changing state of our data. The main thing we wish to avoid is an authentication decision being made based upon a particular classification of data on a system when the classification of that data is known to change over time.\n\nGranular data access is typically controlled at the system level, so we should be just fine. A closer look at our infrastructure may indicate otherwise, so a more detailed examination is required.\n\n### The infrastructure\nThe infrastructure needs to be defined, including some semblance of where the data resides and how it is accessed. For the systems we directly manage and control down to the very lowest level, we have a good grasp on what we have to work with and what controls are available to regulate access to the data they contain. However, a decent part of our infrastructure resides on systems we do not fully control.\n\nIn the modern cloud age, the rise of [software as a service](https://en.wikipedia.org/wiki/Software_as_a_service) (SaaS) applications has become an important part of everyday business operations. Instead of maintaining servers in a server room, a vendor uses the cloud and makes the application accessible over the internet. Each company has their own private set of data maintained by the SaaS provider, and may have different levels of features based upon price that allow them to manipulate and control the data. Examples include Expensify for handling expenses, BambooHR for handling HR functions, and so on. GitLab is no exception to this process. Deployment is often as easy as setting up accounts, and while we’re [working to unify our authentication process under Okta](/handbook/business-technology/okta/#how-is-gitlab-using-okta), it is still not fully deployed.\n\nAs we are an all-remote company, our infrastructure is all-remote. We do the bulk of our company activity inside the GitLab.com software itself, but we also use roughly two dozen SaaS companies’ offerings as well. There are the usual suspects such as Slack and Zoom, but as mentioned we are currently using Expensify, BambooHR, ZenDesk, and many others.\n\nSimply put, our infrastructure poses some unique challenges:\n\n* **Cloud controls.** We are a GCP organization. Also AWS. And Azure. Did I mention DigitalOcean as well? As one might expect, this can create challenges if one has to use parts of the underlying cloud controls to help with authentication and enforcement of access controls, and software components are being moved from platform to platform. Customers don’t notice, but team members handling administrative access might.\n* **Who controls what?** This is not as bad as it sounds, but it is often not 100% clear who has administrative access to different systems. I’d say it is a symptom of a rapidly growing company, but after having experienced the same thing in most companies I’ve worked at, this is a fairly common phenomenon. The problem at GitLab stems from the amount of growth and our own rather unique history: When the company was very small, a single team member might be in control of a piece of infrastructure that slowly scaled up and became huge. Then, if that team member leaves the company, most likely the team member’s department assumes control. Does anyone or everyone have that control now? Does each team member understand all of the data residing in that system? Do they understand that data in relationship to the data classification?\n* **The enforcement of SaaS application privileges.** For systems where we do not have control over the underlying components, enforcing privileges becomes tricky. If a SaaS app has a regular user authentication and the main screen has an “Admin” button to escalate privileges, does our authentication system handle this programmatically?\n\nFortunately we can leverage a number of the [compliance](/handbook/security/security-assurance/security-compliance/) efforts within the company to gain insight into what levels of control we can impose onto each system.\n\n### What's next\n\nIt sure seems like we have a lot of unique challenges! But we do have a huge leg up. For many organizations, the coming of ZTN means the end of the corporate VPN and the falling of huge chunks of the perimeter network. [GitLab doesn’t have a corporate VPN](/handbook/security/#why-we-dont-have-a-corporate-vpn) to dismantle, and as we’ve said before [we’re an all-remote company](/company/culture/all-remote/) so there is no perimeter.\n\nWe’ve discussed a lot of challenges, in the next installment of this series we’ll start talking about a few specifics we are designing to help make things easier. If you’re researching, implementing, or considering ZTN, what are the challenges you’re tackling? Tell us in the comments.\n\n*Special shout-out to the entire security team for their input on this blog series.*\n\nPhoto by [Pixabay](https://www.pexels.com/@pixabay) on [Pexels](https://www.pexels.com/photo/red-office-yellow-school-40799/)\n{: .note}\n",[723,681,9],{"slug":925,"featured":6,"template":685},"zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge","content:en-us:blog:zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge.yml","Zero Trust At Gitlab The Data Classification And Infrastructure Challenge","en-us/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge.yml","en-us/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge",{"_path":931,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":932,"content":938,"config":943,"_id":945,"_type":14,"title":946,"_source":16,"_file":947,"_stem":948,"_extension":19},"/en-us/blog/zero-trust-at-gitlab-where-do-we-go-from-here",{"title":933,"description":934,"ogTitle":933,"ogDescription":934,"noIndex":6,"ogImage":935,"ogUrl":936,"ogSiteName":670,"ogType":671,"canonicalUrls":936,"schema":937},"Zero Trust at GitLab: Where do we go from here?","We take a look back at how far we've come in our ZTN implementation, and at the progress we still need to make.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679704/Blog/Hero%20Images/puria-berenji-Dyi1K2atCRw-unsplash.jpg","https://about.gitlab.com/blog/zero-trust-at-gitlab-where-do-we-go-from-here","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Zero Trust at GitLab: Where do we go from here?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Loveless\"}],\n        \"datePublished\": \"2019-10-15\",\n      }",{"title":933,"description":934,"authors":939,"heroImage":935,"date":940,"body":941,"category":681,"tags":942},[719],"2019-10-15","\n\n*Zero Trust is the practice of shifting access control from the network perimeter to the assets, individuals, and the respective endpoints. For GitLab, Zero Trust means that all users and devices trying to access an endpoint or asset within our GitLab environment will need to authenticate and be authorized. This is part 6 of 6 in our series.*\n* Part one: [The evolution of Zero Trust](/blog/evolution-of-zero-trust/)\n* Part two: [Zero Trust at GitLab: Problems, goals, and coming challenges](/blog/zero-trust-at-gitlab-problems-goals-challenges)\n* Part three: [Zero Trust at GitLab: The data classification and infrastructure challenge](/blog/zero-trust-at-gitlab-the-data-classification-and-infrastructure-challenge/)\n* Part four: [Zero Trust at GitLab: Mitigating challenges with data zones and authentication scoring](/blog/zero-trust-at-gitlab-data-zones-and-authentication-scoring/)\n* Part five: [Zero Trust at GitLab: Implementation challenges](/blog/zero-trust-at-gitlab-implementation-challenges/)\n\nWe've talked pretty openly about forming our ZTN approach and the challenges we expect along the way – as well as the challenges we've already met. If there is an area of ZTN that we've not addressed, or if you're interested in diving deeper into the topic, we invite you to join us October 29, 3-4 pm ET for our [Zero Trust Reddit AMA](https://www.reddit.com/r/netsec/comments/d71p1d/were_a_100_remote_cloudnative_company_and_were/) where you can Ask Us Anything!\n\n## Where we are\nI guess it makes sense to talk about where we are at with this whole ZTN thing. In addition to establishing policies for team members (based upon job descriptions and placement in the org chart), we have classified our data and mapped out our environment so we know where all of the parts are. But there are a few items we want to explain with a bit of detail.\n\n### Getting SaaS\n\nUsing [Okta](https://www.okta.com), we have managed to get (as of this writing) 70 of our [SaaS](https://en.wikipedia.org/wiki/Software_as_a_service) apps under some semblance of control. This “control” has varied heavily – some SaaS apps cleanly and seamlessly integrated with Okta, and some were working kinda-sorta-good-enough to call them integrated. The majority of SaaS integrations work fine as they used [SAML](https://en.wikipedia.org/wiki/Security_Assertion_Markup_Language) and easily integrate in minutes. We can provision and deprovision accounts with simple assignments. Departments like People Ops can do provisioning within minutes instead of days. For some of the integrations, we can force the user to go through Okta, and in a few cases where we have sensitive data, we have extra security steps. For example, to access [BambooHR](https://www.bamboohr.com) users have to go through Okta first (and using Multi-Factor Authentication aka MFA) instead of direct access, and they have to perform yet one more MFA-style step of authentication just for BambooHR.\n\nAre there problems with this? Sure. Not everything integrates as well as [Greenhouse](https://www.greenhouse.io) or BambooHR, because each SaaS has implemented their own APIs and done their own SAML setup. Some don’t offer consistent interfaces to integrate with, which means that our team members can bypass Okta and go straight into the SaaS app in some cases, and in others they are forced to use Okta. This workflow inconsistency is sometimes frustrating for team members. We’re constantly [updating our team member instructions](/handbook/business-technology/okta/okta-enduser-faq/) on Okta usage and try to communicate it to all team members as best we can, but we are impacting some users’ workflows. For example, if you sign in via Okta, you need to keep that tab open in your browser, otherwise your Okta session will end and you’ll find yourself repeatedly “MFAing” until you’re blue in the face. Many people are not used to working that way, and not having all SaaS apps working exactly the same doesn’t help. But overall, the time savings and security are great gains for ZTN and we are quite happy with the implementation.\n\n### SSH access\nAs I write this, we are getting ready to start the [Okta ASA](https://www.okta.com/products/advanced-server-access) rollout to Staging to give it a good test. Like SaaS, we expect a few hiccups here and there – especially since this is a new product for Okta, [released earlier this year](https://www.okta.com/blog/2019/04/advanced-server-access-and-infrastructure-identity/). And talk about workflow changes – if you thought browser-based application users were picky, command line SSH users are a bizarre bunch indeed. Command line junkies practically have their own religion around workflow and we’re introducing a change to that workflow. Yes, it is a minor change, but it already concerns me. Truthfully, because I am one of those oddball Linux users who lives on the command line and I tend to get fairly picky after a couple decades of being able to adjust and customize every aspect of my experience.\n\n### Camo proxy\nThis will seem like a weird one, but mitigating a security issue actually helped us out from a ZTN perspective. There was a security issue reported via our [HackerOne program](/handbook/security/security-engineering/application-security/runbooks/hackerone-process.html) that allowed for malicious users to gather IP addresses from unsuspecting victims via embedded image files. The solution was to use Camo proxy to resolve the [issue](https://gitlab.com/gitlab-org/gitlab-foss/issues/55115). The Camo proxy was widely deployed to ensure all possible links were protected and had the side benefit of ensuring communications going through the proxy were encrypted. Encrypting communications was one of the items we wanted as a part of ZTN and, as it turned out, we’d already done it.\n\n### A sound foundation\nThere are two things we want from our servers and containers and databases. First, we want them buttoned down tight and properly secured. All of these systems have robust controls, and we can perform all kinds of monitoring, but we have to do it at scale. Tightening security controls is especially important if you are using some of the Zero Trust-ish solutions out there to regulate access to these systems. We’re talking about automation of access provisioning, so we want to make sure that minimal access levels required for data stored on systems *remains* minimal access. This means no escalation of privileges due to configuration mistakes or security vulnerabilities. We also want to make sure that all services being offered up by these systems are as secure as possible against compromise, either locally or remotely.\n\nSecond, we want complete visibility into our infrastructure. If something goes awry with a vulnerability being disclosed that potentially impacts our systems or a security incident happens, we want to be able to quickly assess the state of the environment, ensure patches are installed, receive alerts based upon custom triggers to help monitor everything, and so on.\n\nWe are using [Tenable](https://www.tenable.com/products/tenable-io) (mainly for assessments) and Uptycs (mainly for monitoring and alerting) in our environment to help with this visibility. Both certainly handle the basics just fine, in fact Tenable has been quite up to the task. We are facing a few challenges with [Uptycs](https://www.uptycs.com) as we’d like to do more than what the product currently offers. This may not sound like traditional ZTN territory, but it is. It does no good to offer up state-of-the-art authentication and authorization to resources that are poorly maintained and monitored. Like everything else in our company, we face issues with scale – our infrastructure needs to grow and managing the security of that infrastructure must also scale well. Right now we can manage the security of our environment just fine. In fact, it is quite strong, but a lot of it relies on manual intervention which has scaling issues. We have a lot of hash marks in the “win” column with Tenable, but as we scale and expand we’re challenged by Uptycs. In the spirit of openness, we’ll keep you posted on how this progresses.\n\n### The log ride\nTo get a grip on all of this activity, we need to be able to grab all the logs, toss them into one place, and make sense out of them. Our goal is two-fold: we need to understand how our system is being used so we can fine-tune it and we need to be able to detect anomalous events that could signify potential breaches. All of our systems put out logs, and we’ve designed systems to monitor those logs. It is nice to automate alerts so as odd events occur, we’re immediately notified, and in some cases, issues are automatically opened for further triage. We’ve started down this path with deployment of several technologies, related to the [Logging Working Group](/company/team/structure/working-groups/logging/). We’re in the initial first steps, and we expect that logs generated from the various ZTN implementations will help improve the logging efforts, perhaps even propel it along quicker as we work out the kinks.\n\n## The Budget Issue\nA big ZTN question we get involves budget. After all, one company’s solution may involve a couple of small purchases and a large effort of tweaking and reconfiguring existing technology that is already deployed. Another company might have to make some major investments in new products just to get started. In other words, how do you budget for a solution when you don’t know exactly what that solution will look like?\n\nThis is probably one of those things a lot of organizations do not discuss, at least in any detail outside of “it’s expensive”. The idea of ZTN as a concept is an easy sell to most organizations because the benefits are so great. At the lofty bullet-point level on vendor slides, they often seem completely undeniable. But when you break down a concept into digestible and deployable components, you are often into interesting budget territory. Getting a department to buy into the concept is much easier than getting a department to alter their budget and purchase the XYZ product, deploy it, maintain it, and oh yeah please give the security department all of the logs. Of course this is a slight exaggeration to convey a point, but it is more often on the mark than not. We simply couldn’t fully budget for most of this because we didn’t know what we were going to be deploying until we found a particular solution.\n\nIn this case we have to be able to show an [ROI](https://en.wikipedia.org/wiki/Return_on_investment), which means we need to help a department understand the benefits and actually show an improvement to that department’s bottom line. For example, Okta has allowed us to change some onboarding and offboarding processes from days into minutes – and it's a massive timesaver. The push for Okta ASA is because our Infrastructure department saw the gains realized from our Okta rollout, and asked for something similar. Regardless of which department’s budget this could go against, it has to be sold to someone internally. Showing an ROI that clearly states we could financially benefit in one or more areas is really the only way to go about it. Showing the benefits is critical when you are searching for solutions to problems with no idea which solution will work.\n\n## Advice\nSince a lot of people ask for advice on ZTN in general, I’d like to share some impressions from our experience. Here are some major things that really have helped us.\n\n### Break down your needs into simple components\nYou do this by defining the problem end-to-end. For us, we could break it down into user identification and authentication, device identification and authorization, data classification, and policy enforcement. Each part was further broken down into smaller pieces – which includes a lot of what we covered in previous blog posts. This deconstruction helped us understand all of the areas we needed to work with.\n\n### Look at areas of winning\nIf a deployed technology is already solving part of the problem, can it be expanded? If it can’t, why not? Where are the gaps? List those gaps and use them to identify possible solutions during the review. We covered this topic in detail in a previous blog post, [ZTN implementation challenges ](/blog/zero-trust-at-gitlab-implementation-challenges/).\n\n### Ignore the vendor “spin”\nThere are vendors that sell solutions where they claim to be solving ZTN. In my ancient past, I worked for a company that sold (among other things) system administration tools. One day our boss handed us a list of compliance guidelines for three different standards. We were to go through each bullet item for each standard, point out the system administrative tools and the various system checks in our products that lined up with each bullet item, and write them down. This process took a few days, and by the end of the week each compliance standard had a list of checks. The product team grouped these checks together, and just like that we were a compliance company. Now the product line was actually quite good and robust which made this fairly easily, but the pivot of the company to being compliance-focused took longer for that marketing team to print up flyers than it did for the tech part. Yes, we were incomplete – we weren’t asked to write additional checks, we were asked to just use existing checks. But we literally were ready in less than a week with something we could call compliance.\n\nMy point here is that I often get the feeling that ZTN vendors do the same thing. They looked over their existing product line, figured out what they could even remotely claim as being a part of a “Zero Trust” solution, and overnight became a ZTN solutions provider. Of course, if your own organization’s world view on what ZTN is lines up with a particular vendor, great! Buy it. But, for GitLab, we had to break down what we wanted the various components of our technology and data to do and align them with our own ideas of ZTN, refine our model, and then go find vendors that did extremely specific things. For example, we’ve approached Okta with the breakdowns we are trying to solve – and they have products that solve them. For the most part we’ve ignored the whole “ZTN packaged solutions” approach and went after the core of what their products do, and we’re solving our problems as a result.\n\n## Conclusion\nWe’re getting there. We have a lot of wins, and a number of interesting challenges. Every once in a while we will post a new blog to keep you current on our security saga with Zero Trust, and hopefully you can learn from our examples – including our challenges – and help make your systems, data, and users as secure as possible. We hope you’ll follow along and, if you’ve got a ZTN viewpoint to share, we invite you to comment below.\n\n*Special shout-out to the entire security team for their input on this blog series.*\n\nPhoto by [Puria Berenji](https://unsplash.com/@ipuriagram?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText). \n{: .note}\n\n",[723,681,9],{"slug":944,"featured":6,"template":685},"zero-trust-at-gitlab-where-do-we-go-from-here","content:en-us:blog:zero-trust-at-gitlab-where-do-we-go-from-here.yml","Zero Trust At Gitlab Where Do We Go From Here","en-us/blog/zero-trust-at-gitlab-where-do-we-go-from-here.yml","en-us/blog/zero-trust-at-gitlab-where-do-we-go-from-here",{"_path":950,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":951,"content":957,"config":965,"_id":967,"_type":14,"title":968,"_source":16,"_file":969,"_stem":970,"_extension":19},"/en-us/blog/better-code-reviews",{"title":952,"description":953,"ogTitle":952,"ogDescription":953,"noIndex":6,"ogImage":954,"ogUrl":955,"ogSiteName":670,"ogType":671,"canonicalUrls":955,"schema":956},"Better Code Reviews GitLab Style","Better Code Reviews - A selection of tools for your tool-belt when it comes to code reviews.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663502/Blog/Hero%20Images/paperclips.jpg","https://about.gitlab.com/blog/better-code-reviews","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Better Code Reviews GitLab Style\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2020-06-08\",\n      }",{"title":952,"description":953,"authors":958,"heroImage":954,"date":960,"body":961,"category":962,"tags":963},[959],"David O'Regan","2020-06-08","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n> A love letter to anyone that's ever reviewed or been reviewed.\n\nThis blog post originally started as a thank-you message inside the GitLab slack channel `#thanks`, however, the scope of the message grew to such a degree that I wanted to take it a step further and see if I could not only thank the amazing people this post is dedicated to, but also hopefully share some of the amazing things they taught me to help *you*, dear reader.\n\nI have always been rather passionate about feedback. For as long as I can remember, I have always sought feedback on everything I was interested in. It's as true for me in software as it is for my non computer related hobbies like bodybuilding or grammar.....**cough cough**. Feedback is so important for every aspect of life, and in software it is no different. Feedback matters and in GitLab, we deliver most if not all of our feedback to one another via the code review.\n\nThis post is designed to deliver a selection of the most fantastic things I have seen in code reviews here at GitLab, with two goals:\n\n1. Acknowledge the people who work hard to ensure the feedback cycle they provide is as good as it can be, because at GitLab we like to [say thanks](https://handbook.gitlab.com/handbook/values/#say-thanks).\n1. Offer you, the reader, a selection of tools for your toolbelt when it comes to code reviews.\n\nEnter - **Better Code Reviews**.\n\n## Self Reviews - Details Matter\n\n> Before assigning MRs to the reviewer I practice a self-review to help the reviewer and the maintainer understand quirks and caveats of the MR. I am trying to anticipate their concerns/questions. As a maintainer I find it also very valuable. - Peter Leitzen ([@splattael](https://gitlab.com/splattael))\n\nWe often take for granted that details are hard. Moreover, we often take for granted that details in software are even harder. The majority of software consists of layers upon layers of deep abstractions and obscure logic that can be difficult, if not impossible, to really understand without spending a significant amount of time parsing it line by line.\n\nThis process is made even harder when the details or context are incorrect. Though it's natural for this to happen, humans are not spell checkers, nor do the majority of us like to revisit a piece of work a fourth or fifth time to ensure it's as correct as it can be. If we all did this, nothing would ever be delivered.\n\nBut - there is a sweet spot to be found for this dilemma in software where we can keep the velocity of delivery high, and also reduce the feedback cycle time through a small amount of dedicated effort to the details. We talk about some of the details [here in the responsibility of the merge request author](https://docs.gitlab.com/ee/development/code_review.html#the-responsibility-of-the-merge-request-author).\n\nFor the merge request author, step through a checklist. Here is mine. If you can't read my chicken-scratch handwriting, I'll type it out too:\n\n![merge-checklist](https://about.gitlab.com/images/blogimages/merge-checklist.png)\n\nBefore every feedback cycle:\n\n- Re-read every line\n- Test your code locally\n- Write a test for every change (or as many as you can)\n- Write a clear description and update it after each feedback cycle\n- Include at least one screenshot per change. More is better\n- Check, re-check and re-check your labels\n- Consider using a `~\"workflow::refinement\"` label for issues ahead of time like we do in the Monitor:Health team\n- Review the code as if you were the reviewer. Be proactive, answer the likely questions, and open followup issues ahead of time\n\nIf you want to see the last and most important part in action, check out one of our frontend maintainers Natalia Tepluhina([@ntepluhina](https://gitlab.com/ntepluhina)) pre-answer a question she knew would be asked in [one of her merge requests](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/33587#note_353564612).\n\n## Conventional Comments - Communicate Intent\n\n>  **Shaming** This is horrible code. How about re-writing all of it so that it stops being that bad? - Frédéric Caplette ([@f_caplette](https://gitlab.com/f_caplette))\n\nOne of the hardest parts of getting a code review right is communicating the human touch. When we offer feedback and receive feedback, human habit creates cognitive distortion by defaulting to the most negative aspects of that feedback. At GitLab, we try to highlight that in our [value system](https://handbook.gitlab.com/handbook/values/#assume-positive-intent).\n\nIn the world of psychology, this is called **mental filtering**, and it's something that all humans have a tendency to do. Though in software this affliction can be more common, as working in software goes hand-in-hand with valuing yourself based on how intelligent others think you are.\n\nEnter [conventional comments](https://conventionalcomments.org/) by Paul Slaughter ([@pslaughter](https://gitlab.com/pslaughter)) - a well thought-out system for leaving comments in a useful way for both the reviewer and author of the merge request. It's so popular one amazing person made a [browser extension (chrome, firefox)](https://gitlab.com/conventionalcomments/conventional-comments-button) for it!\n\nSo why does adding a single bolded word to the top of a comment help with the human touch? Well, it's all about intent.\n\nWhen you start the comment with an eye-catching single word that defines the intent and tone for the comment, it gives the reader a chance to understand where your comment is coming from.\n\nLet's try an experiment. If you had submitted code for review, which comment would you prefer to read?\n\nOption one:\n\n```bash\nWhat do you think about X instead?\n```\n\nor option two:\n\n```bash\n**suggestion (non-blocking)**\n\nWhat do you think about X instead?\n```\n\nNow if you're anything like me, you took a preference to option two. It had context, communicated empathy, and was an invitation to try something different rather than a command.\n\nThe magic part of this comment is the first line `**suggestion (non-blocking)**`. Straightaway, before you even read the comment, you know the two most important things about it:\n\n1. It's a suggestion from the reviewer\n1. It's non-blocking, communicating it's more of a friendly suggestion then a hard change that's needed\n\nAnother massive advantage this style of commenting has: it allows merge request authors to understand the reviewer is neither trying to block nor act as a gatekeeper for their work. By highlighting what counts as a blocking and a non-blocking comment, merge authors get the full context of what the reviewer is trying to communicate.\n\nTo demonstrate this, let's try another thought experiment! You have submitted a merge request for review and your review comes back with eight comments.\n\n- **Scenario A: No context in comments.** All comments are treated equally because they lack context for what counts as a blocker and what doesn't.\n- **Scenario B:** Context added via conventional comments system.\n\nThe comments can be treated via priority:\n\n1. Blockers => What's needed to get the merge over the line.\n1. Non-blockers => What can be a separate merge or perhaps a discussion.\n\nNext time you're reviewing code, try using conventional comments and watch how it affects not only the way the merge request author feels about the review, but the way **you**, the reviewer, feel leaving the review. My guess is you'll feel a lot better.\n\nWe're currently looking at [integrating this feature directly into GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/26891) because we believe in making GitLab the best possible place for code reviews, and want you to have the best experience possible.\n\nIf you want to see a real-life example of some of Paul Slaughter's ([@pslaughter](https://gitlab.com/pslaughter)) awesome work using conventional comments, check out [his reviews of my community contributions](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/24897) here at GitLab. That empathy shines through.\n\n## The Patch File\n\n> Here's a patch file to better explain - Denys Mishunov ([@dmishunov](https://gitlab.com/dmishunov))\n\nWanna know a `git` secret? Patch files are the stuff of magic. If you want to read about them, [check the Git documentation for patches](https://git-scm.com/docs/git-format-patch).\n\n### How To Make A Patch File\n\nYou can make a patch file via your editor, or via the command line.\n\n#### Via The Editor\n\nRocking a nice fancy IDE or text editor? Most of them support patch files via plugins, or out of the box!\n\n- [VSCode](https://github.com/paragdiwan/vscode-git-patch)\n- [Webstorm](https://www.jetbrains.com/help/webstorm/using-patches.html)\n- [Atom](https://atom.io/packages/git-plus)\n- [Vim](https://vim.fandom.com/wiki/How_to_make_and_submit_a_patch) …life is what happens when you're trying to exit `vim`?\n\n#### Via The CLI\n\nOkay, you’ve made some commits, here’s your `git log`:\n\n```plaintext\ngit log --pretty=oneline -3\n* da33d1k - (feature_branch) Reviewer Commit 1 (7 minutes ago)\n* 66a84ah - (feature_branch) Developer 1 Commit (12 minutes ago)\n* adsc8cd - (REL-0.5.0, origin/master, origin/HEAD, master) Release 13.0 (2 weeks ago)\n```\n\nThis command creates a new file, `reviewer_commit.patch`, with all changes from the reviewer's latest commit against the feature branch:\n\n```plaintext\ngit format-patch HEAD~1 --stdout > reviewer_commit.patch\n```\n\n### Apply The Patch\n\nFirst, take a look at what changes are in the patch. You can do this easily with `git apply`:\n\n```plaintext\ngit apply --stat reviewer_commit.patch\n```\n\nJust a heads up! Despite the name, this command won't actually apply the patch. It will just show the statistics about what the patch will do.\n\nSo now that we've had a look, let's test it first, because not all patches are created equal:\n\n```plaintext\ngit apply --check reviewer_commit.patch\n```\n\nNo errors? Awesome! We can apply this patch without worry.\n\nTo apply the patch, you should use `git am` instead of `git apply`. The reason: `git am` allows you to sign off an applied patch with the reviewer's stamp.\n\n```plaintext\ngit am --signoff \u003C reviewer_commit.patch\nApplying: Reviewer Commit 1\n```\n\nNow run `git log` and you can see the `Signed-off-by` tag in the commit message. This tag makes it very easy to understand how this commit ended up in the code base.\n\n### Why to use them in code reviews\n\nSo now that you know how to make a shiny patch file, why would you use patch files as part of a code review process? There are a few reasons you might consider offering a patch file for a change you feel strongly about:\n\n1. It communicates you have invested a large amount of effort into understanding the author's solution and reasoning\n1. It demonstrates passion for reaching the best solution through teamwork\n1. It offers a willingness on the reviewer's part to accept responsibility for this merge past the point of just reading the code\n\nSome people might argue patch files are a cheeky way for a reviewer to force a change they would rather see make it into the code base, but I argue that anyone who has taken the time to check out a branch, run the project, implement a change, and then submit that change back for a discussion is embodying the value of collaboration to the fullest.\n\nWant to see a awesome example of a patch file in action? Check out one of our frontend maintainers Denys Mishunov ([@dmishunov](https://gitlab.com/dmishunov)) in action using a [patch file to its maximum potential](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/31686#note_341534370)!\n\nWe believe so much in creating the best code review experience here at GitLab, we're looking into how can we make this system a [seamless part of the merge request and code review flow](https://gitlab.com/gitlab-org/gitlab/-/issues/220044).\n\n## Fairness\n\n> Fairness is a person's ability to rise above their own prejudice.\n\nFairness is a odd word. Chris Voss, a former FBI negotiator, said in his book [Never Split The Difference](https://www.goodreads.com/book/show/26156469-never-split-the-difference) that:\n\n> “Fair”—the most powerful word in any negotiation scenario. To become a great negotiator, you must earn the reputation of being a fair one.\n\nCode reviews can be viewed as a negotiation. It's you and another human being having a negotiation, based upon the idea that at the end, the result of this negotiation should be a selection of code that is both of value and of a high standard. While you might think that FBI negotiations and code reviews have little to do with one another, the concept being a fair negotiator often can be the most useful tool in your toolbox as both an author and reviewer.\n\nYou can actually see it mentioned twice in the [permissions to play in points 2 and 7](https://handbook.gitlab.com/handbook/values/#permission-to-play) guidelines here at GitLab:\n\n- \"Be dependable, reliable, fair, and respectful.\"\n- \"Seek out ways to be fair to everyone.\"\n\n### Author Fairness\n\nBeing fair as an author is the easier of the two. When you think of being fair as an author you need to adhere to a few simple Do's and Don'ts:\n\nDo:\n- Write a proper description with screenshots (can't stress this one enough)\n- Understand a reviewers point of view when they make suggestions\n- Pre-address strange parts of your merge (we all have them)\n- Be open to [collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) on your work\n\nDon't:\n- \"plz merge\"\n- Forget to write a description with screenshots\n- Be closed off or take offense to suggestions\n- Forget to include any steps needed to get the build running or in other words(reduce burden where possible!)\n\nHonestly, it's pretty simple to be a fair author of a merge request if you use a small amount of empathy and remember that the person reviewing your code **gets nothing extra** for their time spend reviewing their code. They just want to help take your merge to the next level.\n\n### Reviewer Fairness\n\nBeing fair as a reviewers is a tad harder than being fair as an author. But why, I hear you ask? The issue is something called \"bias\" - or [unconscious-bias](https://handbook.gitlab.com/handbook/values/#unconscious-bias), as the handbook defines it.\n\nBias is, for better or for worse, something we all deal with when it comes to how we *want* things to be. We all have our own styles, preferences, and ideas on how software should be written:\n\n> Eslint be damned I want you to use double quotes!\n\nThis creates issues when it comes to code reviews, because it's normal for a lot of your own bias to bleed into a comment. You begin thinking in absolutes and the unresolved discussion count rises.\n\nLet me ask you something. Have you ever reviewed a merge request and found yourself saying things like:\n\n- \"It should be written like this?\"\n- \"Why would they do it like that?\"\n- \"I would have done it *this* way.\"\n- \"That's not how that should be done!\"\n\nWell, my friends, welcome to another common cognitive distortion called \"Should/must statements\". Do you want to be a better code reviewer? The next time you write a comment and it includes the word \"should\" or \"must\", pause right there and really think about why you felt the need to use that word. Sometimes it will be fair and warranted - such as if your company follows a set of coding conventions like we do at GitLab - but stay vigilant for when those statements are a thin veil for a personal preference. Ask yourself if you're being fair with your review. As a reviewer, if you find yourself in need of using a should/must statement, be sure to supply a reference to supporting documentation that is driving your statement.\n\nOne lesson I have learned through my own experience is that there is almost always a reason for something to be done the way it is. The fair response to something you don't agree with is to ask *why* it's being done like that, not saying it *must* be another way. That is how you become a fair and great negotiator.\n\n## The Follow Up\n\n> I feel like the follow up issue should become a first class citizen. - Sarah Yasonik ([@syasonik](https://gitlab.com/syasonik))\n\nLong merges suck. They just do. And while the concept of \"big doesn't always mean good\" might have started with food, it bleeds into the world of software development through merge requests that are too big. They also directly conflict one of our [main values](https://handbook.gitlab.com/handbook/values/#make-small-merge-requests) of iteration. In GitLab, we take this so seriously that [Danger Bot](https://docs.gitlab.com/ee/development/dangerbot.html) will ask you to break down merges that are over a certain size, helping developers champion the [value of iteration](https://handbook.gitlab.com/handbook/values/#iteration).\n\nLarge merge requests create huge amounts of complexity, they're hard to test, they're hard to reason about, they hard to maintain or extend.....and that's just for the author!\n\nSo what's worse than a large merge request? Reviewing a large merge request. If you've ever been pinged to review a merge request that was longer than 1000 lines, you understand what I am talking about. If it hasn't happened to you yet, count your lucky stars that your teammates live and breathe some good habits like simple solutions and iteration, and value a lack of complexity.\n\nThis creates a bigger problem than a complex reading exercise for the reviewer: it creates a context block. When a review grows past a certain amount of lines, it simply becomes too difficult to reason about without checking out the branch, booting the project and smoke testing. While smoke testing complex reviews are a great idea, it should **not** become the default ideal for reviewing.\n\nIf the merge request is too long, the code review is too complex / too long. The code rots, your merge conflicts grow, you can't iterate, you're constantly addressing breaking conflicts … and you're stuck for days, maybe weeks, maybe forever.\n\nSo how do we fix this? In the Monitor:Health team's iteration retrospective, my teammate Sarah Yasonik ([@syasonik](https://gitlab.com/syasonik)) raised a point where she suggested the follow up issue / merge become a first class citizen. I thought she was onto something amazing. If your merge is too long, or your reviews are taking too long, break your merge down, keep the reviews small, and offer follow-ups.\n\nTreat the follow-up merge as a first-class citizen. Do it right there *while reading the reviewer's feedback* instead of adding more code to a already too big merge! Do **not** make a already bloated merge even worse by adding more scope. Divide and conquer where possible.\n\nI think a lot of developers and reviewers find this process difficult because it's a contract of faith: \n\n- I, the author, promise to deliver a follow-up.\n- I, the reviewer, put myself on the line by taking your word that you will in fact fix this issue later.\n\nIt's scary, and lacks polish. I get it, but you should never let tomorrow's perfect stop today's progress because - spoiler alert - tomorrow isn't here, and we really only have today.\n\n### The Author Follow Up\n\nIf you offer a follow up, deliver it. It's your only rule but you cannot break it. Your credit for wilding the follow up resides solely in your consistent ability to deliver on your promises over time. As a author you should also work with your PMs and EMs to help prioritize the follow up as part of a wider team effort.\n\n### The Reviewer Follow Up\n\n- If you are offered a follow up, accept it with grace and trust the developer to make good on their promise.\n- Be open to suggesting follow ups as part of your review.\n- Be patient with people.\n- Allow for wiggle room, but know when to say no. (A follow-up for a non-critical issue is fine, but not a blatant break that won't add more lines or context.)\n\n## The Art Of The GIF\n\n>  If a Picture Speaks 1,000 Words a animated GIF Speaks 100,000.\n\nWhile this is the least technical aspect of the entire post it is perhaps the most interesting and the easiest to implement.\n\nDid you know that 93% of communication is nonverbal? I didn't, until I started seeing GIFs in code reviews. When I began to see them pop up in reviews, they deeply caught my attention, and I began to wonder why they had such a lasting impression on me as a developer.\n\nWords are powerful, but images are particularly powerful because of their ties to our emotions. Images have the potential to create powerful emotional responses, and when you see something that sparks a sense of positivity, it sets the tone for the entire review. You **understand fully** that the reviewer really cares and wants to communicate that care non-verbally.\n\nSo how do you use GIFs in your merge requests and code reviews? Well, you could [start with our handbook instructions](/handbook/product/making-gifs/), but the short and sweet version:\n\n1. Use a screen recorder to capture the video you want to show as a GIF.\n2. Grab yourself a copy of [gifify](https://github.com/vvo/gifify).\n3. GIF all day long!\n\n### GIFs That Show You Care\n\nI won't ever forget the first time I saw a [funny GIF in a code review](https://gitlab.com/gitlab-org/gitlab-ui/-/merge_requests/1193#note_307290889). I never even made it to reading the comment, because all I could comprehend was this animated GIF of a thumbs-up and I remember thinking: *This merge would pass review. It would all be okay.* The sheer childlike giddy nature of seeing this image in action made me smile ear-to-ear. Every other comment could have been a rant about how awful my code was, but I wouldn't have cared.\n\nIf I can give you one piece of advice for your code reviews as a reviewer, use GIFs in a light-hearted way, because they are:\n\n- empathy-laden\n- soften the blow of a hard topic\n- foster positivity\n- make code reviews fun!\n\n![teamwork](https://media.giphy.com/media/vcHTRiZOglHNu/giphy.gif)\n\nWe're currently looking at making [Giphy a integrated feature here at GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/17379), making your code reviews even easier and more fun!\n\n## Tactical code reviews through the value of iteration\n\n> Can we make this smaller? - Clement Ho([@ClemMakesApps](https://gitlab.com/ClemMakesApps))\n\nOne thing I have noticed that help time and time again for better code reviews is the idea of breaking down a merge request into the smallest piece possible. A few people in my time here at GitLab have really put this across as a valuable way of working, but it was my frontend engineering manager Clement Ho([@ClemMakesApps](https://gitlab.com/ClemMakesApps)) that I really took notice championing this ideal. Given that I started paying close attention to this idea and began to notice benefits almost immediately when implementing the idea.\n\nIf we look at the GitLab value handbook's [suggestions iteration competency](https://handbook.gitlab.com/handbook/values/#iteration-competency) you can see that the value in small, digestible merge requests which translates into smaller code reviews:\n\n| Level | Demonstrates Iteration Competency by… |\n|","unfiltered",[964,9,534],"code review",{"slug":966,"featured":6,"template":685},"better-code-reviews","content:en-us:blog:better-code-reviews.yml","Better Code Reviews","en-us/blog/better-code-reviews.yml","en-us/blog/better-code-reviews",2,[663,690,709,730,751,773,793,812,833],1753475285958]